From e3e83b7e71c7d9f6a34b9d7b407b72ca3eda9b4e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 17 Oct 2024 10:03:25 +0200 Subject: [PATCH 001/970] docs(trie): revealed sparse trie invariants (#11825) --- crates/trie/sparse/src/trie.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 2edaaf76b27..d8f4280e875 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -69,6 +69,13 @@ impl SparseTrie { } /// The representation of revealed sparse trie. +/// +/// ## Invariants +/// +/// - The root node is always present in `nodes` collection. +/// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. +/// The opposite is also true. +/// - All keys in `values` collection are full leaf paths. #[derive(PartialEq, Eq)] pub struct RevealedSparseTrie { /// All trie nodes. From 63a75fdd95ea71028d9d65c277a514f7e518676e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 17 Oct 2024 10:07:45 +0200 Subject: [PATCH 002/970] fix(trie): intermediate trie node hashes (#11826) --- crates/trie/sparse/src/trie.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index d8f4280e875..e83522ca890 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -345,9 +345,7 @@ impl RevealedSparseTrie { } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } } @@ -360,9 +358,7 @@ impl RevealedSparseTrie { let (_, child) = rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } else { path_stack.extend([path, child_path]); // need to get rlp node for child first @@ -400,9 +396,7 @@ impl RevealedSparseTrie { self.rlp_buf.clear(); let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) .rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } + *hash = rlp_node.as_hash(); rlp_node } }; From 491f154c3437d36d3a8add91f76efce8eea28a63 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:30:26 +0200 Subject: [PATCH 003/970] primitives-traits: rm redundant definitions of `EMPTY_OMMER_ROOT_HASH` (#11820) --- Cargo.lock | 4 ++++ crates/ethereum/consensus/Cargo.toml | 1 + crates/ethereum/consensus/src/lib.rs | 2 +- crates/ethereum/payload/Cargo.toml | 1 + crates/ethereum/payload/src/lib.rs | 3 ++- crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/lib.rs | 5 ++--- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 3 ++- crates/optimism/primitives/src/bedrock.rs | 3 +-- crates/primitives-traits/src/constants/mod.rs | 4 ---- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/proofs.rs | 4 ++-- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 4 ++-- crates/rpc/rpc-types-compat/src/engine/payload.rs | 3 ++- 15 files changed, 24 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 740810d1b1b..e434e80cfde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7270,6 +7270,7 @@ dependencies = [ name = "reth-ethereum-consensus" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -7320,6 +7321,7 @@ dependencies = [ name = "reth-ethereum-payload-builder" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-basic-payload-builder", "reth-chain-state", @@ -8086,6 +8088,7 @@ dependencies = [ name = "reth-optimism-consensus" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -8194,6 +8197,7 @@ dependencies = [ name = "reth-optimism-payload-builder" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index 02d217b63b2..af934d3e2b6 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -19,5 +19,6 @@ reth-consensus.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index e74f3498fa5..8f2a8a72042 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -19,7 +20,6 @@ use reth_consensus_common::validation::{ }; use reth_primitives::{ constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, - EMPTY_OMMER_ROOT_HASH, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index f169d58f7e8..ce37a4f8ea4 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -34,6 +34,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 248aa3486de..dcf54fc0248 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -26,7 +27,7 @@ use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, proofs::{self, calculate_requests_root}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, EthereumHardforks, Header, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index f5f061c5992..e2520c89340 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -25,6 +25,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index fe67ff1bcd9..16c1d5d37d7 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,6 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -20,9 +21,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{ - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, -}; +use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e1d6fe47d29..46cc82edb6c 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -41,6 +41,7 @@ alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0a8dcdb1244..e590635f524 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; @@ -16,7 +17,7 @@ use reth_primitives::{ constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, Header, Receipt, TxType, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 1a347aecafe..bd42298588f 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,9 +1,8 @@ //! OP mainnet bedrock related data. -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; use reth_primitives::Header; -use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index d40abdd64ba..5d64b911b60 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -119,10 +119,6 @@ pub const DEV_GENESIS_HASH: B256 = pub const KECCAK_EMPTY: B256 = b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); -/// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347` -pub const EMPTY_OMMER_ROOT_HASH: B256 = - b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); - /// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ec65cbf20e5..a59e72bbd55 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -40,8 +40,8 @@ pub use block::{ #[cfg(feature = "reth-codec")] pub use compression::*; pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, - MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, MAINNET_GENESIS_HASH, + SEPOLIA_GENESIS_HASH, }; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index dc814804ec8..4efbb588e10 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,10 +1,10 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, - Request, TransactionSigned, Withdrawal, + Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, }; use alloc::vec::Vec; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 81c6a567846..832cf17055a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -20,7 +20,7 @@ use reth_primitives::{ ResultAndState, SpecId, }, Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, + TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 84943b60e20..e6f2f97ca75 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,6 +1,7 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ @@ -9,7 +10,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, }; use reth_primitives::{ - constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, + constants::MAXIMUM_EXTRA_DATA_SIZE, proofs::{self}, Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, }; From bac244ae970f201afda63811b7b8d8b901fb94de Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 17 Oct 2024 11:20:56 +0100 Subject: [PATCH 004/970] feat(trie): sparse trie leaf removal (#11752) Co-authored-by: Roman Krasiuk --- Cargo.lock | 8 +- crates/trie/sparse/Cargo.toml | 11 +- crates/trie/sparse/src/trie.rs | 708 ++++++++++++++++++++++++++++++--- 3 files changed, 669 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e434e80cfde..d31239808a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4464,7 +4464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -9248,9 +9248,13 @@ dependencies = [ "assert_matches", "criterion", "itertools 0.13.0", + "pretty_assertions", "proptest", + "rand 0.8.5", "rayon", "reth-primitives", + "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-common", "smallvec", @@ -11388,7 +11392,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index c31bbe2df2f..4ba6ed0f2ec 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -15,6 +15,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-tracing.workspace = true reth-trie-common.workspace = true reth-trie.workspace = true @@ -26,18 +27,22 @@ alloy-rlp.workspace = true tracing.workspace = true # misc -thiserror.workspace = true rayon.workspace = true smallvec = { workspace = true, features = ["const_new"] } +thiserror.workspace = true [dev-dependencies] reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } + assert_matches.workspace = true +criterion.workspace = true itertools.workspace = true +pretty_assertions = "1.4" proptest.workspace = true -criterion.workspace = true +rand.workspace = true [[bench]] name = "root" diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e83522ca890..0b9ffb5c0ed 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,6 +1,7 @@ use crate::{SparseTrieError, SparseTrieResult}; use alloy_primitives::{hex, keccak256, map::HashMap, B256}; use alloy_rlp::Decodable; +use reth_tracing::tracing::debug; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut}, RlpNode, @@ -264,8 +265,244 @@ impl RevealedSparseTrie { } /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, _path: Nibbles) { - unimplemented!() + pub fn remove_leaf(&mut self, path: Nibbles) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + let existing = self.values.remove(&path); + if existing.is_none() { + // trie structure unchanged, return immediately + return Ok(()) + } + + let mut removed_nodes = self.take_nodes_for_path(&path)?; + debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path.clone(); + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend_from_slice_unchecked(key); + assert_eq!(child_path, path); + } + + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + return Ok(()) + } + + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; + + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) + } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child.path, + hash: *hash, + }) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, + } + } + SparseNode::Branch { mut state_mask, hash: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); + } + + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); + + // Get full path of the only child node left. + let mut child_path = removed_path.clone(); + child_path.push_unchecked(child_nibble); + + // Remove the only child node. + let child = self.nodes.get(&child_path).unwrap(); + + debug!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child_path, + hash: *hash, + }) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; + + if delete_child { + self.nodes.remove(&child_path); + } + + new_node + } + // If more than one child is left set in the branch, we just re-insert it + // as-is. + else { + SparseNode::new_branch(state_mask) + } + } + }; + + child = RemovedSparseNode { + path: removed_path.clone(), + node: new_node.clone(), + unset_branch_nibble: None, + }; + debug!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } + + Ok(()) + } + + /// Traverse trie nodes down to the leaf node and collect all nodes along the path. + fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { + let mut current = Nibbles::default(); // Start traversal from the root + let mut nodes = Vec::new(); // Collect traversed nodes + + while let Some(node) = self.nodes.remove(¤t) { + match &node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: _key, .. } => { + // Leaf node is always the one that we're deleting, and no other leaf nodes can + // be found during traversal. + + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(_key); + assert_eq!(¤t, path); + } + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble: None, + }); + break + } + SparseNode::Extension { key, .. } => { + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(key); + assert!(path.starts_with(¤t)); + } + + let path = current.clone(); + current.extend_from_slice_unchecked(key); + nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + debug_assert!(state_mask.is_bit_set(nibble)); + + // If the branch node has a child that is a leaf node that we're removing, + // we need to unset this nibble. + // Any other branch nodes will not require unsetting the nibble, because + // deleting one leaf node can not remove the whole path + // where the branch node is located. + let mut child_path = + Nibbles::from_nibbles([current.as_slice(), &[nibble]].concat()); + let unset_branch_nibble = self + .nodes + .get(&child_path) + .map_or(false, move |node| match node { + SparseNode::Leaf { key, .. } => { + // Get full path of the leaf node + child_path.extend_from_slice_unchecked(key); + &child_path == path + } + _ => false, + }) + .then_some(nibble); + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble, + }); + + current.push_unchecked(nibble); + } + } + } + + Ok(nodes) } /// Return the root of the sparse trie. @@ -476,13 +713,87 @@ impl SparseNode { } } +#[derive(Debug)] +struct RemovedSparseNode { + path: Nibbles, + node: SparseNode, + unset_branch_nibble: Option, +} + #[cfg(test)] mod tests { + use std::collections::BTreeMap; + use super::*; use alloy_primitives::U256; use itertools::Itertools; use proptest::prelude::*; - use reth_trie_common::HashBuilder; + use rand::seq::IteratorRandom; + use reth_testing_utils::generators; + use reth_trie::{BranchNode, ExtensionNode, LeafNode}; + use reth_trie_common::{ + proof::{ProofNodes, ProofRetainer}, + HashBuilder, + }; + + /// Calculate the state root by feeding the provided state to the hash builder and retaining the + /// proofs for the provided targets. + /// + /// Returns the state root and the retained proof nodes. + fn hash_builder_root_with_proofs>( + state: impl IntoIterator, + proof_targets: impl IntoIterator, + ) -> (B256, ProofNodes) { + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + for (key, value) in state { + hash_builder.add_leaf(key, value.as_ref()); + } + (hash_builder.root(), hash_builder.take_proof_nodes()) + } + + /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. + fn assert_eq_sparse_trie_proof_nodes( + sparse_trie: &RevealedSparseTrie, + proof_nodes: ProofNodes, + ) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let sparse_nodes = sparse_trie.nodes.iter().sorted_by_key(|(path, _)| *path); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!(equals, "proof node: {:?}, sparse node: {:?}", proof_node, sparse_node); + } + } #[test] fn sparse_trie_is_blind() { @@ -495,14 +806,15 @@ mod tests { let path = Nibbles::unpack(B256::with_last_byte(42)); let value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let mut hash_builder = HashBuilder::default(); - hash_builder.add_leaf(path.clone(), &value); - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs([(path.clone(), &value)], [path.clone()]); let mut sparse = RevealedSparseTrie::default(); sparse.update_leaf(path, value.to_vec()).unwrap(); - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -510,18 +822,19 @@ mod tests { let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); let value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let mut hash_builder = HashBuilder::default(); - for path in &paths { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default(); for path in &paths { sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -529,18 +842,19 @@ mod tests { let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); let value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let mut hash_builder = HashBuilder::default(); - for path in &paths { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default(); for path in &paths { sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -556,18 +870,19 @@ mod tests { .collect::>(); let value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(|| value.clone())), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default(); for path in &paths { sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -576,52 +891,339 @@ mod tests { let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &old_value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value.clone())), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default(); for path in &paths { sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &new_value); - } - let expected = hash_builder.root(); + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value.clone())), + paths.clone(), + ); for path in &paths { sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_remove_leaf() { + reth_tracing::init_test_tracing(); + + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1011) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) + // │ └── 3 -> Leaf (Key = 3, Path = 50233) + // ├── 2 -> Leaf (Key = 013, Path = 52013) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::new()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::new()) + ), + ( + Nibbles::from_nibbles([0x5, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x1, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 0231, Path = 50231) + // │ └── 3 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::new()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::new()) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_ext(Nibbles::from_nibbles([0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Leaf (Key = 3302, Path = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x0, 0x2])) + ), + ]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + + // Leaf (Key = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([( + Nibbles::new(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) + ),]) + ); + + sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + + // Empty + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([(Nibbles::new(), SparseNode::Empty),]) + ); } #[test] - fn sparse_trie_empty_update_fuzz() { + fn sparse_trie_fuzz() { proptest!(ProptestConfig::with_cases(10), |(updates: Vec>)| { - let mut state = std::collections::BTreeMap::default(); + let mut rng = generators::rng(); + + let mut state = BTreeMap::default(); + let mut unpacked_state = BTreeMap::default(); let mut sparse = RevealedSparseTrie::default(); for update in updates { - for (key, value) in &update { - sparse.update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()).unwrap(); + let keys_to_delete_len = update.len() / 2; + + let unpacked_update = update.iter().map(|(key, value)| ( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec() + )); + + // Insert state updates into the sparse trie and calculate the root + for (key, value) in unpacked_update.clone() { + sparse.update_leaf(key, value).unwrap(); } - let root = sparse.root(); + let sparse_root = sparse.root(); + // Insert state updates into the hash builder and calculate the root + unpacked_state.extend(unpacked_update); state.extend(update); - let mut hash_builder = HashBuilder::default(); - for (key, value) in &state { - hash_builder.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(value)); + let keys = state.keys().map(Nibbles::unpack).collect::>(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + unpacked_state.clone(), + keys, + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + + let keys_to_delete = state + .keys() + .choose_multiple(&mut rng, keys_to_delete_len) + .into_iter() + .copied() + .collect::>(); + for key in keys_to_delete { + state.remove(&key).unwrap(); + unpacked_state.remove(&Nibbles::unpack(key)).unwrap(); + sparse.remove_leaf(Nibbles::unpack(key)).unwrap(); } - let expected = hash_builder.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + + let keys = state.keys().map(Nibbles::unpack).collect::>(); + let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( + unpacked_state.clone(), + keys, + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } }); } From 4254b80a89630f1cb875d05c4b3c32f39af86b6b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 17 Oct 2024 12:27:57 +0200 Subject: [PATCH 005/970] bench(trie): avoid unnecessary clones in hash builder repeated bench (#11827) --- crates/trie/sparse/benches/root.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 4078eb7af31..248e3caeeee 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -90,12 +90,23 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { }, |(init_storage, storage_updates, mut trie_updates)| { let mut storage = init_storage; - for update in storage_updates { + let mut storage_updates = storage_updates.into_iter().peekable(); + while let Some(update) = storage_updates.next() { storage.extend(&update); let prefix_set = update.construct_prefix_set().freeze(); - let storage_sorted = storage.clone().into_sorted(); - let trie_updates_sorted = trie_updates.clone().into_sorted(); + let (storage_sorted, trie_updates_sorted) = + if storage_updates.peek().is_some() { + ( + storage.clone().into_sorted(), + trie_updates.clone().into_sorted(), + ) + } else { + ( + std::mem::take(&mut storage).into_sorted(), + std::mem::take(&mut trie_updates).into_sorted(), + ) + }; let walker = TrieWalker::new( InMemoryStorageTrieCursor::new( @@ -133,7 +144,9 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { } hb.root(); - trie_updates.finalize(node_iter.walker, hb); + if storage_updates.peek().is_some() { + trie_updates.finalize(node_iter.walker, hb); + } } }, ) From b47ce92d9a049f1069162d7d8e5c63b056d3c36e Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 17 Oct 2024 04:36:25 -0600 Subject: [PATCH 006/970] replace ChainSpec to use EthereumHardforks trait (#11824) --- crates/payload/primitives/src/lib.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 5d100405135..8173cae344a 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,7 +26,7 @@ pub use traits::{ mod payload; pub use payload::PayloadOrAttributes; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; /// The types that are used by the engine API. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static { /// The built payload type. @@ -125,8 +125,8 @@ pub fn validate_payload_timestamp( /// Validates the presence of the `withdrawals` field according to the payload timestamp. /// After Shanghai, withdrawals field must be [Some]. /// Before Shanghai, withdrawals field must be [None]; -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, +pub fn validate_withdrawals_presence( + chain_spec: &T, version: EngineApiMessageVersion, message_validation_kind: MessageValidationKind, timestamp: u64, @@ -210,8 +210,8 @@ pub fn validate_withdrawals_presence( /// `MessageValidationKind::Payload`, then the error code will be `-32602: Invalid params`. If the /// parameter is `MessageValidationKind::PayloadAttributes`, then the error code will be `-38003: /// Invalid payload attributes`. -pub fn validate_parent_beacon_block_root_presence( - chain_spec: &ChainSpec, +pub fn validate_parent_beacon_block_root_presence( + chain_spec: &T, version: EngineApiMessageVersion, validation_kind: MessageValidationKind, timestamp: u64, @@ -298,13 +298,14 @@ impl MessageValidationKind { /// either an execution payload, or payload attributes. /// /// The version is provided by the [`EngineApiMessageVersion`] argument. -pub fn validate_version_specific_fields( - chain_spec: &ChainSpec, +pub fn validate_version_specific_fields( + chain_spec: &T, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, Type>, ) -> Result<(), EngineObjectValidationError> where Type: PayloadAttributes, + T: EthereumHardforks, { validate_withdrawals_presence( chain_spec, From f8902b59f5ccc626469e419c273e724a6bb79bae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 12:54:28 +0200 Subject: [PATCH 007/970] chore: pedantic style change (#11832) --- crates/primitives/src/transaction/mod.rs | 35 +++++++++++------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index aeee4232e05..da1a2d87139 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,18 +1,15 @@ //! Transaction types. use crate::BlockHashOrNumber; -use alloy_eips::eip7702::SignedAuthorization; -use alloy_primitives::{keccak256, Address, ChainId, TxKind, B256, U256}; - use alloy_consensus::{ - SignableTransaction, Transaction as AlloyTransaction, TxEip1559, TxEip2930, TxEip4844, - TxEip7702, TxLegacy, + SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, + eip7702::SignedAuthorization, }; -use alloy_primitives::{Bytes, TxHash}; +use alloy_primitives::{keccak256, Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; @@ -709,7 +706,7 @@ impl Encodable for Transaction { } } -impl AlloyTransaction for Transaction { +impl alloy_consensus::Transaction for Transaction { fn chain_id(&self) -> Option { match self { Self::Legacy(tx) => tx.chain_id(), @@ -782,18 +779,6 @@ impl AlloyTransaction for Transaction { } } - fn priority_fee_or_price(&self) -> u128 { - match self { - Self::Legacy(tx) => tx.priority_fee_or_price(), - Self::Eip2930(tx) => tx.priority_fee_or_price(), - Self::Eip1559(tx) => tx.priority_fee_or_price(), - Self::Eip4844(tx) => tx.priority_fee_or_price(), - Self::Eip7702(tx) => tx.priority_fee_or_price(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.priority_fee_or_price(), - } - } - fn max_fee_per_blob_gas(&self) -> Option { match self { Self::Legacy(tx) => tx.max_fee_per_blob_gas(), @@ -806,6 +791,18 @@ impl AlloyTransaction for Transaction { } } + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.priority_fee_or_price(), + Self::Eip2930(tx) => tx.priority_fee_or_price(), + Self::Eip1559(tx) => tx.priority_fee_or_price(), + Self::Eip4844(tx) => tx.priority_fee_or_price(), + Self::Eip7702(tx) => tx.priority_fee_or_price(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.priority_fee_or_price(), + } + } + fn to(&self) -> TxKind { match self { Self::Legacy(tx) => tx.to(), From b77265e61b06cb6402ac210a01396ebf57c55418 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Thu, 17 Oct 2024 18:59:27 +0800 Subject: [PATCH 008/970] reth-bench: rm redundant clone (#11829) --- bin/reth-bench/src/bench/new_payload_fcu.rs | 3 +-- bin/reth-bench/src/bench/new_payload_only.rs | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index a8c18b48a2b..ca5359fb8c2 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -37,9 +37,8 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-fcu` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index e6392318a54..85342d1af76 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -35,11 +35,10 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-only` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); // TODO: this could be just a function I guess, but destructuring makes the code slightly // more readable than a 4 element tuple. let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { From 3bc3e7169990b9340e60c394b0e3241728fd841c Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:21:40 +0200 Subject: [PATCH 009/970] primitives: use `EMPTY_OMMER_ROOT_HASH` const when possible (#11833) --- crates/consensus/common/src/validation.rs | 4 ++-- crates/net/eth-wire-types/src/header.rs | 11 ++++------- crates/storage/codecs/src/alloy/header.rs | 3 ++- crates/storage/provider/src/test_utils/blocks.rs | 5 ++--- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index df66a00d1df..711e7772b66 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -300,7 +300,7 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip4844, EMPTY_ROOT_HASH}; + use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, }; @@ -441,7 +441,7 @@ mod tests { let header = Header { parent_hash: hex!("859fad46e75d9be177c2584843501f2270c7e5231711e90848290d12d7c6dcdd").into(), - ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: hex!("4675c7e5baafbffbca748158becba61ef3b0a263").into(), state_root: hex!("8337403406e368b3e40411138f4868f79f6d835825d55fd0c2f6e17b1a3948e9").into(), transactions_root: EMPTY_ROOT_HASH, diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 7ecfc802d8a..b25a7568b22 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,7 +87,7 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::EMPTY_ROOT_HASH; + use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::Header; @@ -124,7 +124,7 @@ mod tests { .unwrap(); let header = Header { parent_hash: b256!("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("ba5e000000000000000000000000000000000000"), state_root: b256!("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7"), transactions_root: b256!("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf"), @@ -228,10 +228,7 @@ mod tests { "3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", ) .unwrap(), - ommers_hash: B256::from_str( - "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - ) - .unwrap(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), state_root: B256::from_str( "3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae", @@ -280,7 +277,7 @@ mod tests { "13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5", ) .unwrap(), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("f97e180c050e5ab072211ad2c213eb5aee4df134"), state_root: b256!("ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"), transactions_root: EMPTY_ROOT_HASH, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 3a17ed1fdcd..623eded9ee9 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -126,12 +126,13 @@ impl Compact for AlloyHeader { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{address, b256, bloom, bytes, hex}; /// Holesky block #1947953 const HOLESKY_BLOCK: Header = Header { parent_hash: b256!("8605e0c46689f66b3deed82598e43d5002b71a929023b665228728f0c6e62a95"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("c6e2459991bfe27cca6d86722f35da23a1e4cb97"), state_root: b256!("edad188ca5647d62f4cca417c11a1afbadebce30d23260767f6f587e9b3b9993"), transactions_root: b256!("4daf25dc08a841aa22aa0d3cb3e1f159d4dcaf6a6063d4d36bfac11d3fdb63ee"), diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 57e111d674b..2a70664b1b3 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,6 +1,6 @@ //! Dummy blocks and data for tests use crate::{DatabaseProviderRW, ExecutionOutcome}; -use alloy_consensus::TxLegacy; +use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, TxKind, B256, U256, @@ -66,8 +66,7 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo Header { parent_hash: hex!("c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94f") .into(), - ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").into(), state_root: hex!("50554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583d") .into(), From 52407b18de50a07efde4d441d9107ab78f1c19ae Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 17 Oct 2024 15:14:48 +0200 Subject: [PATCH 010/970] chore(sdk): incorporate block module into `reth-primitives-traits` (#11835) --- crates/primitives-traits/src/block.rs | 99 ---------------------- crates/primitives-traits/src/block/body.rs | 72 ++-------------- crates/primitives-traits/src/block/mod.rs | 47 +++++----- crates/primitives-traits/src/lib.rs | 3 + 4 files changed, 39 insertions(+), 182 deletions(-) delete mode 100644 crates/primitives-traits/src/block.rs diff --git a/crates/primitives-traits/src/block.rs b/crates/primitives-traits/src/block.rs deleted file mode 100644 index 02f581801c9..00000000000 --- a/crates/primitives-traits/src/block.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Block abstraction. - -pub mod body; - -use alloc::fmt; -use core::ops; - -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; - -use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; - -/// Abstraction of block data type. -pub trait Block: - fmt::Debug - + Clone - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> -{ - /// Header part of the block. - type Header: BlockHeader + Sealable; - - /// The block's body contains the transactions in the block. - type Body: BlockBody; - - /// A block and block hash. - type SealedBlock; - - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. - fn header(&self) -> &Self::Header; - - /// Returns reference to [`BlockBody`] type. - fn body(&self) -> &Self::Body; - - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result; - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; -} diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 03246c68b45..85eeda166c4 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,11 +1,10 @@ //! Block body abstraction. -use alloc::fmt; -use core::ops; +use alloc::{fmt, vec::Vec}; -use alloy_consensus::{BlockHeader,Request, Transaction, TxType}; +use alloy_consensus::{BlockHeader, Request, Transaction, TxType}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{Address, B256}; -use alloy_eips::eip1559::Withdrawal; use crate::Block; @@ -37,7 +36,7 @@ pub trait BlockBody: /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::SignedTransaction]; - /// Returns [`Withdrawals`] in the block, if any. + /// Returns `Withdrawals` in the block, if any. // todo: branch out into extension trait fn withdrawals(&self) -> Option<&Self::Withdrawals>; @@ -60,13 +59,13 @@ pub trait BlockBody: /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no /// withdrawals, this will return `None`. - // todo: can be default impl if `calculate_withdrawals_root` made into a method on + // todo: can be default impl if `calculate_withdrawals_root` made into a method on // `Withdrawals` and `Withdrawals` moved to alloy fn calculate_withdrawals_root(&self) -> Option; /// Calculate the requests root for the block body, if requests exist. If there are no /// requests, this will return `None`. - // todo: can be default impl if `calculate_requests_root` made into a method on + // todo: can be default impl if `calculate_requests_root` made into a method on // `Requests` and `Requests` moved to alloy fn calculate_requests_root(&self) -> Option; @@ -75,17 +74,17 @@ pub trait BlockBody: /// Returns whether or not the block body contains any blob transactions. fn has_blob_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + self.transactions().iter().any(|tx| tx.ty() == TxType::Eip4844 as u8) } /// Returns whether or not the block body contains any EIP-7702 transactions. fn has_eip7702_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip7702 as u8) + self.transactions().iter().any(|tx| tx.ty() == TxType::Eip7702 as u8) } /// Returns an iterator over all blob transactions of the block fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.transactions().iter().filter(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) + self.transactions().iter().filter(|tx| tx.ty() == TxType::Eip4844 as u8) } /// Returns only the blob transactions, if any, from the block body. @@ -104,56 +103,3 @@ pub trait BlockBody: /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. fn size(&self) -> usize; } - -impl BlockBody for T -where - T: ops::Deref - + Clone - + fmt::Debug - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + alloy_rlp::Encodable - + alloy_rlp::Decodable, -{ - type Header = ::Header; - type SignedTransaction = ::SignedTransaction; - - fn transactions(&self) -> &Vec { - self.deref().transactions() - } - - fn withdrawals(&self) -> Option<&Withdrawals> { - self.deref().withdrawals() - } - - fn ommers(&self) -> &Vec { - self.deref().ommers() - } - - fn requests(&self) -> Option<&Requests> { - self.deref().requests() - } - - fn calculate_tx_root(&self) -> B256 { - self.deref().calculate_tx_root() - } - - fn calculate_ommers_root(&self) -> B256 { - self.deref().calculate_ommers_root() - } - - fn recover_signers(&self) -> Option> { - self.deref().recover_signers() - } - - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.deref().blob_versioned_hashes_iter() - } - - fn size(&self) -> usize { - self.deref().size() - } -} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 02f581801c9..395cf61df14 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -2,15 +2,22 @@ pub mod body; -use alloc::fmt; -use core::ops; +use alloc::{fmt, vec::Vec}; use alloy_consensus::BlockHeader; use alloy_primitives::{Address, Sealable, B256}; -use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +use crate::BlockBody; + +/// Helper trait, unifies behaviour required of a block header. +pub trait Header: BlockHeader + Sealable {} + +impl Header for T where T: BlockHeader + Sealable {} /// Abstraction of block data type. +// todo: make sealable super-trait, depends on +// todo: make with senders extension trait, so block can be impl by block type already containing +// senders pub trait Block: fmt::Debug + Clone @@ -23,16 +30,16 @@ pub trait Block: + Into<(Self::Header, Self::Body)> { /// Header part of the block. - type Header: BlockHeader + Sealable; + type Header: Header; /// The block's body contains the transactions in the block. type Body: BlockBody; /// A block and block hash. - type SealedBlock; + type SealedBlock; /// A block and addresses of senders of transactions in it. - type BlockWithSenders; + type BlockWithSenders; /// Returns reference to [`BlockHeader`] type. fn header(&self) -> &Self::Header; @@ -41,24 +48,24 @@ pub trait Block: fn body(&self) -> &Self::Body; /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and + // todo: can be default impl if sealed block type is made generic over header and body and // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; + fn seal_slow(self) -> Self::SealedBlock; /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and + // todo: can be default impl if sealed block type is made generic over header and body and // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; + fn seal(self, hash: B256) -> Self::SealedBlock; /// Expensive operation that recovers transaction signer. See - /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). + /// `SealedBlockWithSenders`. fn senders(&self) -> Option> { self.body().recover_signers() } - /// Transform into a [`BlockWithSenders`]. + /// Transform into a `BlockWithSenders`. /// /// # Panics /// @@ -67,32 +74,32 @@ pub trait Block: /// /// Note: this is expected to be called with blocks read from disk. #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { + fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { self.try_with_senders_unchecked(senders).expect("stored block is valid") } - /// Transform into a [`BlockWithSenders`] using the given senders. + /// Transform into a `BlockWithSenders` using the given senders. /// /// If the number of senders does not match the number of transactions in the block, this falls /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] + /// See also `SignedTransaction::recover_signer_unchecked`. /// /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated + // todo: can be default impl if block with senders type is made generic over block and migrated // to alloy #[track_caller] fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result; + ) -> Result, Self>; - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// **Expensive**. Transform into a `BlockWithSenders` by recovering senders in the contained /// transactions. /// /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and + // todo: can be default impl if sealed block type is made generic over header and body and // migrated to alloy - fn with_recovered_senders(self) -> Option; + fn with_recovered_senders(self) -> Option>; /// Calculates a heuristic for the in-memory size of the [`Block`]. fn size(&self) -> usize; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index dd68607f591..8c54bd68c96 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -32,6 +32,9 @@ pub use integer_list::{IntegerList, IntegerListError}; pub mod request; pub use request::{Request, Requests}; +pub mod block; +pub use block::{body::BlockBody, Block}; + mod withdrawal; pub use withdrawal::{Withdrawal, Withdrawals}; From 6ba4bbe4aad49e483b0670903bdb0328d9ca1b57 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 15:54:32 +0200 Subject: [PATCH 011/970] chore: make op-evm compile with no-std (#11834) --- Cargo.lock | 2 +- crates/optimism/evm/Cargo.toml | 4 +++- crates/optimism/evm/src/error.rs | 13 ++++++++----- crates/optimism/evm/src/execute.rs | 3 ++- crates/optimism/evm/src/l1.rs | 2 +- crates/optimism/evm/src/lib.rs | 6 +++++- crates/optimism/evm/src/strategy.rs | 3 ++- 7 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d31239808a2..855e3de5e1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8108,6 +8108,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", + "derive_more 1.0.0", "op-alloy-consensus", "reth-chainspec", "reth-consensus", @@ -8123,7 +8124,6 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", - "thiserror", "tracing", ] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 0a22dcfddb4..a1e2021a4ad 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -37,7 +37,7 @@ revm.workspace = true revm-primitives.workspace = true # misc -thiserror.workspace = true +derive_more.workspace = true tracing.workspace = true [dev-dependencies] @@ -51,6 +51,8 @@ alloy-genesis.workspace = true alloy-consensus.workspace = true [features] +default = ["std"] +std = [] optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index c5c6a0a4a3d..71f8709e1ad 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,27 +1,30 @@ //! Error types for the Optimism EVM module. +use alloc::string::String; use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors -#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] pub enum OptimismBlockExecutionError { /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] + #[display("could not get L1 block info from L2 block: {message}")] L1BlockInfoError { /// The inner error message message: String, }, /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] + #[display("failed to force create2deployer account code")] ForceCreate2DeployerFail, /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] + #[display("blob transaction included in sequencer block")] BlobTransactionRejected, /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] + #[display("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), } +impl core::error::Error for OptimismBlockExecutionError {} + impl From for BlockExecutionError { fn from(err: OptimismBlockExecutionError) -> Self { Self::other(err) diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index f7da1c250d9..26342062321 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -3,8 +3,10 @@ use crate::{ l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, }; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_primitives::{BlockNumber, U256}; +use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_evm::{ execute::{ @@ -27,7 +29,6 @@ use revm_primitives::{ db::{Database, DatabaseCommit}, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, }; -use std::{fmt::Display, sync::Arc}; use tracing::trace; /// Provides executors to execute regular optimism blocks diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 3412501eb99..e0668ab0204 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,6 +1,7 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; +use alloc::{string::ToString, sync::Arc}; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; @@ -11,7 +12,6 @@ use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, }; -use std::sync::Arc; use tracing::trace; /// The address of the create2 deployer diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 4d0f9d89ff4..eb067da3256 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -6,9 +6,14 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +#[macro_use] +extern crate alloc; + +use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_optimism_chainspec::OpChainSpec; @@ -18,7 +23,6 @@ use reth_primitives::{ Head, Header, TransactionSigned, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs index fe8164cc7cf..9ba43604ea0 100644 --- a/crates/optimism/evm/src/strategy.rs +++ b/crates/optimism/evm/src/strategy.rs @@ -1,7 +1,9 @@ //! Optimism block execution strategy, use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; +use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ @@ -24,7 +26,6 @@ use reth_revm::{ use revm_primitives::{ db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, }; -use std::{fmt::Display, sync::Arc}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. From 2131c87edb8f3c7459a73c19de9034fd1e28a77c Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Thu, 17 Oct 2024 22:40:10 +0800 Subject: [PATCH 012/970] refactor: rm redundant clones in tests (#11840) --- crates/prune/prune/src/segments/receipts.rs | 6 ++++-- crates/prune/prune/src/segments/user/receipts_by_logs.rs | 1 + crates/prune/prune/src/segments/user/sender_recovery.rs | 7 ++++--- crates/prune/prune/src/segments/user/transaction_lookup.rs | 6 ++++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 05482d65953..c081bf88c7d 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -109,12 +109,14 @@ mod tests { let mut receipts = Vec::new(); for block in &blocks { + receipts.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { receipts .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); } } - db.insert_receipts(receipts.clone()).expect("insert receipts"); + let receipts_len = receipts.len(); + db.insert_receipts(receipts).expect("insert receipts"); assert_eq!( db.table::().unwrap().len(), @@ -194,7 +196,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - receipts.len() - (last_pruned_tx_number + 1) + receipts_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 05bc40b6c7b..ee2accee1b3 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -263,6 +263,7 @@ mod tests { let (deposit_contract_addr, _) = random_eoa_account(&mut rng); for block in &blocks { + receipts.reserve_exact(block.body.size()); for (txi, transaction) in block.body.transactions.iter().enumerate() { let mut receipt = random_receipt(&mut rng, transaction, Some(1)); receipt.logs.push(random_log( diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index bd86f3e6521..f189e6c36af 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -110,6 +110,7 @@ mod tests { let mut transaction_senders = Vec::new(); for block in &blocks { + transaction_senders.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { transaction_senders.push(( transaction_senders.len() as u64, @@ -117,8 +118,8 @@ mod tests { )); } } - db.insert_transaction_senders(transaction_senders.clone()) - .expect("insert transaction senders"); + let transaction_senders_len = transaction_senders.len(); + db.insert_transaction_senders(transaction_senders).expect("insert transaction senders"); assert_eq!( db.table::().unwrap().len(), @@ -202,7 +203,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - transaction_senders.len() - (last_pruned_tx_number + 1) + transaction_senders_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index bb8196cdb03..2df8cccf305 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -140,11 +140,13 @@ mod tests { let mut tx_hash_numbers = Vec::new(); for block in &blocks { + tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); } } - db.insert_tx_hash_numbers(tx_hash_numbers.clone()).expect("insert tx hash numbers"); + let tx_hash_numbers_len = tx_hash_numbers.len(); + db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); assert_eq!( db.table::().unwrap().len(), @@ -228,7 +230,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - tx_hash_numbers.len() - (last_pruned_tx_number + 1) + tx_hash_numbers_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory From 9db28d91a47e309960f554479a5fbfa61a56dc4e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 17 Oct 2024 18:29:17 +0200 Subject: [PATCH 013/970] chore(sdk): Impl `alloy_consensus::Transaction` for `TransactionSigned` (#11843) --- crates/primitives/src/transaction/mod.rs | 66 ++++++++++++++++++- .../rpc-types-compat/src/transaction/mod.rs | 4 +- crates/rpc/rpc/src/eth/helpers/types.rs | 5 +- crates/rpc/rpc/src/txpool.rs | 2 +- crates/transaction-pool/src/traits.rs | 4 +- 5 files changed, 71 insertions(+), 10 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index da1a2d87139..10fddcb4c37 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1351,6 +1351,68 @@ impl TransactionSigned { } } +impl alloy_consensus::Transaction for TransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn to(&self) -> TxKind { + alloy_consensus::Transaction::to(self.deref()) + } + + fn value(&self) -> U256 { + self.deref().value() + } + + fn input(&self) -> &[u8] { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } +} + impl From for TransactionSigned { fn from(recovered: TransactionSignedEcRecovered) -> Self { recovered.signed_transaction @@ -2181,8 +2243,8 @@ mod tests { let tx = TransactionSigned::decode_2718(&mut data.as_slice()).unwrap(); let sender = tx.recover_signer().unwrap(); assert_eq!(sender, address!("001e2b7dE757bA469a57bF6b23d982458a07eFcE")); - assert_eq!(tx.to(), Some(address!("D9e1459A7A482635700cBc20BBAF52D495Ab9C96"))); - assert_eq!(tx.input().as_ref(), hex!("1b55ba3a")); + assert_eq!(tx.to(), Some(address!("D9e1459A7A482635700cBc20BBAF52D495Ab9C96")).into()); + assert_eq!(tx.input(), hex!("1b55ba3a")); let encoded = tx.encoded_2718(); assert_eq!(encoded.as_ref(), data.to_vec()); } diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index a489a588617..7ffd48cb1f7 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -48,9 +48,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { // baseFee` let gas_price = base_fee .and_then(|base_fee| { - signed_tx - .effective_tip_per_gas(Some(base_fee)) - .map(|tip| tip + base_fee as u128) + signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) }) .unwrap_or_else(|| signed_tx.max_fee_per_gas()); diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 982afdcac0a..ab7b1a268e0 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -37,8 +37,9 @@ where let GasPrice { gas_price, max_fee_per_gas } = Self::gas_price(&signed_tx, base_fee.map(|fee| fee as u64)); + let input = signed_tx.input().to_vec().into(); let chain_id = signed_tx.chain_id(); - let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); + let blob_versioned_hashes = signed_tx.blob_versioned_hashes().map(|hs| hs.to_vec()); let access_list = signed_tx.access_list().cloned(); let authorization_list = signed_tx.authorization_list().map(|l| l.to_vec()); @@ -60,7 +61,7 @@ where max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), signature: Some(signature), gas: signed_tx.gas_limit(), - input: signed_tx.input().clone(), + input, chain_id, access_list, transaction_type: Some(signed_tx.tx_type() as u8), diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 47aaac0bbfd..d40cdc5cdbb 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -101,7 +101,7 @@ where entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { - to: tx.to(), + to: tx.to().into(), value: tx.value(), gas: tx.gas_limit() as u128, gas_price: tx.transaction.max_fee_per_gas(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d19381935ec..ee4a5ada3d8 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1183,7 +1183,7 @@ impl PoolTransaction for EthPooledTransaction { /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. /// For legacy transactions: `gas_price - base_fee`. fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.transaction.effective_tip_per_gas(Some(base_fee)) + self.transaction.effective_tip_per_gas(base_fee) } /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and @@ -1199,7 +1199,7 @@ impl PoolTransaction for EthPooledTransaction { } fn input(&self) -> &[u8] { - self.transaction.input().as_ref() + self.transaction.input() } /// Returns a measurement of the heap usage of this type and all its internals. From 1aa3ce1a5a99d9ffd050d18b8a2fc621c22e80b2 Mon Sep 17 00:00:00 2001 From: Kunal Arora <55632507+aroralanuk@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:02:29 +0530 Subject: [PATCH 014/970] feat(cli): add ChainSpecParser type to rethCli (#11772) --- crates/cli/cli/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs index f7bf716ea37..e2c55057a48 100644 --- a/crates/cli/cli/src/lib.rs +++ b/crates/cli/cli/src/lib.rs @@ -15,6 +15,7 @@ use std::{borrow::Cow, ffi::OsString}; /// The chainspec module defines the different chainspecs that can be used by the node. pub mod chainspec; +use crate::chainspec::ChainSpecParser; /// Reth based node cli. /// @@ -23,6 +24,9 @@ pub mod chainspec; /// It provides commonly used functionality for running commands and information about the CL, such /// as the name and version. pub trait RethCli: Sized { + /// The associated `ChainSpecParser` type + type ChainSpecParser: ChainSpecParser; + /// The name of the implementation, eg. `reth`, `op-reth`, etc. fn name(&self) -> Cow<'static, str>; From 0a1473b6e7949875f480043fa6874bdc583786d2 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Fri, 18 Oct 2024 00:40:59 +0800 Subject: [PATCH 015/970] perf(blockchain-tree:) use `Vec::reserve_exact` (#11839) --- crates/blockchain-tree/src/blockchain_tree.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 71a58aa5628..db43dffcd36 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -902,6 +902,7 @@ where // check unconnected block buffer for children of the chains let mut all_chain_blocks = Vec::new(); for chain in self.state.chains.values() { + all_chain_blocks.reserve_exact(chain.blocks().len()); for (&number, block) in chain.blocks() { all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) } From 76edc388237c18a586038924bc39dcbc6c72eaac Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:48:05 -0400 Subject: [PATCH 016/970] fix(rpc): apply beacon root contract call in debug_traceTransaction (#11845) --- crates/rpc/rpc/src/debug.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index acf215b3b2c..164f402e44c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -16,6 +16,7 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; use reth_evm::{ execute::{BlockExecutorProvider, Executor}, + system_calls::SystemCaller, ConfigureEvmEnv, }; use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; @@ -26,7 +27,7 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, + helpers::{Call, EthApiSpec, EthTransactions, LoadState, TraceExt}, EthApiTypes, FromEthApiError, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; @@ -245,6 +246,7 @@ where // block the transaction is included in let state_at: BlockId = block.parent_hash.into(); let block_hash = block.hash(); + let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.eth_api() @@ -255,6 +257,26 @@ where let tx = transaction.into_recovered(); let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // apply relevant system calls + let mut system_caller = SystemCaller::new( + Call::evm_config(this.eth_api()).clone(), + LoadState::provider(this.eth_api()).chain_spec(), + ); + + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), + ) + })?; + // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( &mut db, From 52848a352a241cb1d16ef5040ef2c4b16b0cac15 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Oct 2024 20:13:05 +0200 Subject: [PATCH 017/970] fix: check for prague timestmap on pool init (#11847) --- crates/transaction-pool/src/validate/eth.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 49165f189a0..2594e569aa0 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -639,6 +639,7 @@ impl EthTransactionValidatorBuilder { pub fn with_head_timestamp(mut self, timestamp: u64) -> Self { self.cancun = self.chain_spec.is_cancun_active_at_timestamp(timestamp); self.shanghai = self.chain_spec.is_shanghai_active_at_timestamp(timestamp); + self.prague = self.chain_spec.is_prague_active_at_timestamp(timestamp); self } From a6c8bda029eb32703ae77a8eea4e69aa0ec00a10 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:44:04 +0200 Subject: [PATCH 018/970] primitives: use alloy `MAINNET_GENESIS_HASH` constant (#11848) --- Cargo.lock | 5 +++++ crates/chainspec/Cargo.toml | 1 + crates/chainspec/src/spec.rs | 3 ++- crates/ethereum-forks/Cargo.toml | 1 + crates/ethereum-forks/src/forkid.rs | 11 ++++------- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/tests/e2e/blobs.rs | 8 +++----- crates/net/eth-wire-types/src/status.rs | 13 ++++--------- crates/primitives-traits/src/constants/mod.rs | 5 ----- crates/primitives/src/lib.rs | 5 +---- crates/storage/db-common/Cargo.toml | 1 + crates/storage/db-common/src/init.rs | 3 ++- examples/manual-p2p/Cargo.toml | 2 ++ examples/manual-p2p/src/main.rs | 3 ++- 14 files changed, 29 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 855e3de5e1c..600d61e97e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2857,6 +2857,7 @@ dependencies = [ name = "example-manual-p2p" version = "0.0.0" dependencies = [ + "alloy-consensus", "eyre", "futures", "reth-chainspec", @@ -6528,6 +6529,7 @@ name = "reth-chainspec" version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -6813,6 +6815,7 @@ dependencies = [ name = "reth-db-common" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "boyer-moore-magiclen", @@ -7303,6 +7306,7 @@ name = "reth-ethereum-forks" version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -7931,6 +7935,7 @@ dependencies = [ name = "reth-node-ethereum" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index b44a606b65b..2864427c2af 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -23,6 +23,7 @@ alloy-eips = { workspace = true, features = ["serde"] } alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-trie.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a8bae966b58..deaca188a21 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -7,6 +7,7 @@ use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -18,7 +19,7 @@ use reth_network_peers::{ use reth_primitives_traits::{ constants::{ DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, - HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }, Header, SealedHeader, }; diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 08a0bc98dbc..62ea234cd5b 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -36,6 +36,7 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +alloy-consensus.workspace = true [features] default = ["std", "serde", "rustc-hash"] diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 6876d0eb926..b612f3b0b1a 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -446,15 +446,12 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::b256; - - const GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + use alloy_consensus::constants::MAINNET_GENESIS_HASH; // EIP test vectors. #[test] fn forkhash() { - let mut fork_hash = ForkHash::from(GENESIS_HASH); + let mut fork_hash = ForkHash::from(MAINNET_GENESIS_HASH); assert_eq!(fork_hash.0, hex!("fc64ec04")); fork_hash += 1_150_000u64; @@ -468,7 +465,7 @@ mod tests { fn compatibility_check() { let mut filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ ForkFilterKey::Block(1_150_000), @@ -727,7 +724,7 @@ mod tests { let mut fork_filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ForkFilterKey::Block(b1), ForkFilterKey::Block(b2)], ); diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 7a323f91d87..213071cbfb5 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -52,6 +52,7 @@ alloy-genesis.workspace = true tokio.workspace = true futures-util.workspace = true serde_json.workspace = true +alloy-consensus.workspace = true [features] default = [] diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 9390b34f444..b4d9a532aeb 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use alloy_primitives::b256; use reth::{ args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, @@ -75,13 +75,11 @@ async fn can_handle_blobs() -> eyre::Result<()> { .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) .await?; - let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); - let (_, _) = tokio::join!( // send fcu with blob hash - node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, blob_block_hash), // send fcu with normal hash - node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, payload.block().hash()) ); // submit normal payload diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index a5e7530ec09..90e1731c90a 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -138,10 +138,10 @@ impl Default for Status { /// /// # Example /// ``` +/// use alloy_consensus::constants::MAINNET_GENESIS_HASH; /// use alloy_primitives::{B256, U256}; /// use reth_chainspec::{Chain, EthereumHardfork, MAINNET}; /// use reth_eth_wire_types::{EthVersion, Status}; -/// use reth_primitives::MAINNET_GENESIS_HASH; /// /// // this is just an example status message! /// let status = Status::builder() @@ -216,6 +216,7 @@ impl StatusBuilder { #[cfg(test)] mod tests { use crate::{EthVersion, Status}; + use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -235,10 +236,7 @@ mod tests { "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; @@ -258,10 +256,7 @@ mod tests { "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; let status = Status::decode(&mut &data[..]).unwrap(); diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 5d64b911b60..890287f8bc1 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -98,11 +98,6 @@ pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; /// Multiplier for converting ether to wei. pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; -/// The Ethereum mainnet genesis hash: -/// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3` -pub const MAINNET_GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); - /// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` pub const SEPOLIA_GENESIS_HASH: B256 = b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a59e72bbd55..4a8d812ef83 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,10 +39,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::{ - DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, MAINNET_GENESIS_HASH, - SEPOLIA_GENESIS_HASH, -}; +pub use constants::{DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, SEPOLIA_GENESIS_HASH}; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7fc48796986..9e4954357f8 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -42,6 +42,7 @@ tracing.workspace = true [dev-dependencies] reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +alloy-consensus.workspace = true [lints] workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 3962dfd6980..f0695421ec5 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -581,6 +581,7 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; @@ -591,7 +592,7 @@ mod tests { transaction::DbTx, Database, }; - use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use reth_primitives::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index 2303bfbfeac..b1642f66ca0 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -14,6 +14,8 @@ reth-eth-wire.workspace = true reth-ecies.workspace = true reth-network-peers.workspace = true +alloy-consensus.workspace = true + secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } futures.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index edaaf858489..857a8a1c126 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -8,6 +8,7 @@ use std::time::Duration; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use futures::StreamExt; use reth_chainspec::{Chain, MAINNET}; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; @@ -17,7 +18,7 @@ use reth_eth_wire::{ }; use reth_network::config::rng_secret_key; use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; -use reth_primitives::{EthereumHardfork, Head, MAINNET_GENESIS_HASH}; +use reth_primitives::{EthereumHardfork, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::sync::LazyLock; use tokio::net::TcpStream; From 8eb5d4f04782ae6d22bc242fc0ec7db63115b315 Mon Sep 17 00:00:00 2001 From: "0xriazaka.eth" <168359025+0xriazaka@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:28:13 +0100 Subject: [PATCH 019/970] Remove unsafe from impl Compact for ClientVersion (#11318) Co-authored-by: Emilia Hane Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/storage/codecs/src/lib.rs | 15 +++++++++++++++ crates/storage/db-models/src/client_version.rs | 18 +++++++----------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 8608c5eb8c1..54ca046cb71 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -78,6 +78,21 @@ pub trait Compact: Sized { } } +impl Compact for alloc::string::String { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_bytes().to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (vec, buf) = Vec::::from_compact(buf, len); + let string = Self::from_utf8(vec).unwrap(); // Safe conversion + (string, buf) + } +} + impl Compact for &T { fn to_compact(&self, buf: &mut B) -> usize where diff --git a/crates/storage/db-models/src/client_version.rs b/crates/storage/db-models/src/client_version.rs index de074ac88c6..a28e7385f65 100644 --- a/crates/storage/db-models/src/client_version.rs +++ b/crates/storage/db-models/src/client_version.rs @@ -28,20 +28,16 @@ impl Compact for ClientVersion { where B: bytes::BufMut + AsMut<[u8]>, { - self.version.as_bytes().to_compact(buf); - self.git_sha.as_bytes().to_compact(buf); - self.build_timestamp.as_bytes().to_compact(buf) + self.version.to_compact(buf); + self.git_sha.to_compact(buf); + self.build_timestamp.to_compact(buf) } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (version, buf) = Vec::::from_compact(buf, len); - let (git_sha, buf) = Vec::::from_compact(buf, len); - let (build_timestamp, buf) = Vec::::from_compact(buf, len); - let client_version = Self { - version: unsafe { String::from_utf8_unchecked(version) }, - git_sha: unsafe { String::from_utf8_unchecked(git_sha) }, - build_timestamp: unsafe { String::from_utf8_unchecked(build_timestamp) }, - }; + let (version, buf) = String::from_compact(buf, len); + let (git_sha, buf) = String::from_compact(buf, len); + let (build_timestamp, buf) = String::from_compact(buf, len); + let client_version = Self { version, git_sha, build_timestamp }; (client_version, buf) } } From b57cbfd21ba4a39d31c64cf4c7bbc5449a86afb5 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:29:31 +0200 Subject: [PATCH 020/970] primitives: use alloy `DEV_GENESIS_HASH` constant (#11849) --- Cargo.lock | 1 + crates/chainspec/src/spec.rs | 4 ++-- crates/optimism/chainspec/Cargo.toml | 1 + crates/optimism/chainspec/src/dev.rs | 2 +- crates/primitives-traits/src/constants/mod.rs | 4 ---- crates/primitives/src/lib.rs | 2 +- 6 files changed, 6 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 600d61e97e2..1d9f74a207d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8032,6 +8032,7 @@ name = "reth-optimism-chainspec" version = "1.1.0" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-genesis", "alloy-primitives", "derive_more 1.0.0", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index deaca188a21..adfee564d74 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -7,7 +7,7 @@ use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; -use alloy_consensus::constants::MAINNET_GENESIS_HASH; +use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH}; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -18,7 +18,7 @@ use reth_network_peers::{ }; use reth_primitives_traits::{ constants::{ - DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, + EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }, Header, SealedHeader, diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index c9f951c8d20..efc9bf0b012 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -25,6 +25,7 @@ reth-optimism-forks.workspace = true alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # op op-alloy-rpc-types.workspace = true diff --git a/crates/optimism/chainspec/src/dev.rs b/crates/optimism/chainspec/src/dev.rs index cb8163dfc52..eae25f73e01 100644 --- a/crates/optimism/chainspec/src/dev.rs +++ b/crates/optimism/chainspec/src/dev.rs @@ -3,10 +3,10 @@ use alloc::sync::Arc; use alloy_chains::Chain; +use alloy_consensus::constants::DEV_GENESIS_HASH; use alloy_primitives::U256; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_optimism_forks::DEV_HARDFORKS; -use reth_primitives_traits::constants::DEV_GENESIS_HASH; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 890287f8bc1..eade399897b 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -106,10 +106,6 @@ pub const SEPOLIA_GENESIS_HASH: B256 = pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); -/// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c` -pub const DEV_GENESIS_HASH: B256 = - b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"); - /// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` pub const KECCAK_EMPTY: B256 = b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4a8d812ef83..a39139349ab 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,7 +39,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::{DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, SEPOLIA_GENESIS_HASH}; +pub use constants::{HOLESKY_GENESIS_HASH, KECCAK_EMPTY, SEPOLIA_GENESIS_HASH}; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; From bc43613be35f316304f7ce4a2225855ac26b5923 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:33:37 +0200 Subject: [PATCH 021/970] chore: disable SC2034 in check_wasm.sh (#11854) --- .github/assets/check_wasm.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 1b1c0641fc0..52d7009412c 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -3,7 +3,10 @@ set +e # Disable immediate exit on error # Array of crates to compile crates=($(cargo metadata --format-version=1 --no-deps | jq -r '.packages[].name' | grep '^reth' | sort)) + # Array of crates to exclude +# Used with the `contains` function. +# shellcheck disable=SC2034 exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 From 96ad6d5bd525c821ea95122290c76cf133f5d36d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Oct 2024 00:40:15 +0200 Subject: [PATCH 022/970] chore: rm unused reth-revm c-kzg feature (#11860) --- crates/revm/Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 7ffb06ce960..b2d3bccde6b 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -32,8 +32,7 @@ reth-ethereum-forks.workspace = true alloy-primitives.workspace = true [features] -default = ["std", "c-kzg"] +default = ["std"] std = [] -c-kzg = ["revm/c-kzg"] test-utils = ["dep:reth-trie"] serde = ["revm/serde"] From f3c0dda0d34960b6c28e27b41bdc2c86b712c09e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Oct 2024 00:40:33 +0200 Subject: [PATCH 023/970] perf: use existing block hash functions (#11858) --- crates/storage/storage-api/src/block_id.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 3d9df2e329f..55cd6ab1c76 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -82,11 +82,10 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { BlockNumberOrTag::Pending => self .pending_block_num_hash() .map(|res_opt| res_opt.map(|num_hash| num_hash.hash)), - _ => self - .convert_block_number(num)? - .map(|num| self.block_hash(num)) - .transpose() - .map(|maybe_hash| maybe_hash.flatten()), + BlockNumberOrTag::Finalized => self.finalized_block_hash(), + BlockNumberOrTag::Safe => self.safe_block_hash(), + BlockNumberOrTag::Earliest => self.block_hash(0), + BlockNumberOrTag::Number(num) => self.block_hash(num), }, } } From 62e7625b16549530beafef1e0c6a6d9ed2bde649 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 18 Oct 2024 00:41:59 +0200 Subject: [PATCH 024/970] primitives: use alloy `*_TX_TYPE_ID` constants (#11853) --- crates/primitives/src/lib.rs | 2 -- crates/primitives/src/receipt.rs | 8 ++++---- crates/primitives/src/transaction/mod.rs | 7 +++---- crates/primitives/src/transaction/pooled.rs | 3 ++- crates/primitives/src/transaction/sidecar.rs | 6 ++++-- crates/primitives/src/transaction/tx_type.rs | 20 ++++--------------- crates/transaction-pool/src/config.rs | 6 ++---- crates/transaction-pool/src/pool/txpool.rs | 10 +++++----- .../transaction-pool/src/test_utils/mock.rs | 6 ++++-- crates/transaction-pool/src/traits.rs | 6 ++++-- crates/transaction-pool/src/validate/eth.rs | 6 ++++-- 11 files changed, 36 insertions(+), 44 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a39139349ab..796090d79e5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -61,8 +61,6 @@ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, }; // Re-exports diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index cfd831ed0f7..9006a067e9c 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,10 +1,10 @@ #[cfg(feature = "reth-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{ - logs_bloom, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - EIP7702_TX_TYPE_ID, -}; +use crate::{logs_bloom, TxType}; use alloc::{vec, vec::Vec}; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, +}; use alloy_primitives::{Bloom, Bytes, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 10fddcb4c37..b7eeeadc897 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,6 +1,8 @@ //! Transaction types. use crate::BlockHashOrNumber; +#[cfg(any(test, feature = "reth-codec"))] +use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; use alloy_consensus::{ SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, }; @@ -37,10 +39,7 @@ pub use compat::FillTxEnv; pub use signature::{ extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked, Signature, }; -pub use tx_type::{ - TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, -}; +pub use tx_type::TxType; pub use variant::TransactionSignedVariant; pub(crate) mod access_list; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ec49f44a680..32d4da65980 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -8,9 +8,10 @@ use super::{ }; use crate::{ BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, EIP4844_TX_TYPE_ID, + TransactionSignedEcRecovered, }; use alloy_consensus::{ + constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, SignableTransaction, TxEip4844WithSidecar, }; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 87b8c1fbf3e..edc1427d1fe 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,7 +1,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{Signature, Transaction, TransactionSigned, EIP4844_TX_TYPE_ID}; -use alloy_consensus::{transaction::TxEip4844, TxEip4844WithSidecar}; +use crate::{Signature, Transaction, TransactionSigned}; +use alloy_consensus::{ + constants::EIP4844_TX_TYPE_ID, transaction::TxEip4844, TxEip4844WithSidecar, +}; use alloy_primitives::{keccak256, TxHash}; use alloy_rlp::{Decodable, Error as RlpError, Header}; use serde::{Deserialize, Serialize}; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index c55e0d3c619..b6e1ecbf226 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,3 +1,7 @@ +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use serde::{Deserialize, Serialize}; @@ -23,22 +27,6 @@ pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; -/// Identifier for legacy transaction, however [`TxLegacy`](alloy_consensus::TxLegacy) this is -/// technically not typed. -pub const LEGACY_TX_TYPE_ID: u8 = 0; - -/// Identifier for [`TxEip2930`](alloy_consensus::TxEip2930) transaction. -pub const EIP2930_TX_TYPE_ID: u8 = 1; - -/// Identifier for [`TxEip1559`](alloy_consensus::TxEip1559) transaction. -pub const EIP1559_TX_TYPE_ID: u8 = 2; - -/// Identifier for [`TxEip4844`](alloy_consensus::TxEip4844) transaction. -pub const EIP4844_TX_TYPE_ID: u8 = 3; - -/// Identifier for [`TxEip7702`](alloy_consensus::TxEip7702) transaction. -pub const EIP7702_TX_TYPE_ID: u8 = 4; - /// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. #[cfg(feature = "optimism")] pub const DEPOSIT_TX_TYPE_ID: u8 = 126; diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 1b4b010a8e1..30703f888c3 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -2,11 +2,9 @@ use crate::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, PoolSize, TransactionOrigin, }; +use alloy_consensus::constants::EIP4844_TX_TYPE_ID; use alloy_primitives::Address; -use reth_primitives::{ - constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, - EIP4844_TX_TYPE_ID, -}; +use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 9d284392db5..a85a9a1856b 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,14 +18,14 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::{ - constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, - }, +use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_primitives::{Address, TxHash, B256}; +use reth_primitives::constants::{ + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, +}; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index e2b5f373e44..474cf5cc8f8 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -7,7 +7,10 @@ use crate::{ CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, ValidPoolTransaction, }; -use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, + TxEip1559, TxEip2930, TxEip4844, TxLegacy, +}; use alloy_eips::eip2930::AccessList; use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use paste::paste; @@ -20,7 +23,6 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index ee4a5ada3d8..fcfdae4ed1b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,7 +7,10 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; -use alloy_consensus::Transaction as _; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Transaction as _, +}; use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; @@ -17,7 +20,6 @@ use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 2594e569aa0..22744c58a79 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -11,11 +11,13 @@ use crate::{ EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::{ constants::eip4844::MAX_BLOBS_PER_BLOCK, GotExpected, InvalidTransactionError, SealedBlock, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, }; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; From dfcaad4608797ed05a8b896fcfad83a83a6292af Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Oct 2024 00:42:24 +0200 Subject: [PATCH 025/970] chore: remove some cfg imports (#11864) --- crates/primitives/src/block.rs | 4 +--- crates/primitives/src/receipt.rs | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index de0817fb025..b7e11f7b92e 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -10,8 +10,6 @@ use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] -use proptest::prelude::prop_compose; -#[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; use reth_primitives_traits::Requests; use serde::{Deserialize, Serialize}; @@ -20,7 +18,7 @@ use serde::{Deserialize, Serialize}; // a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the // requests as withdrawals #[cfg(any(feature = "arbitrary", test))] -prop_compose! { +proptest::prelude::prop_compose! { pub fn empty_requests_strategy()(_ in 0..1) -> Option { None } diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 9006a067e9c..b117f8d9615 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -11,14 +11,14 @@ use bytes::{Buf, BufMut}; use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; #[cfg(feature = "reth-codec")] -use reth_codecs::{Compact, CompactZstd}; +use reth_codecs::Compact; use serde::{Deserialize, Serialize}; /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, )] -#[cfg_attr(any(test, feature = "reth-codec"), derive(CompactZstd))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::CompactZstd))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[rlp(trailing)] pub struct Receipt { @@ -130,7 +130,7 @@ impl From for ReceiptWithBloom { /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ReceiptWithBloom { /// Bloom filter build from logs. From 0c70f6bd3519d5c5bce85481bea728df89fdabf3 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:25:22 +0200 Subject: [PATCH 026/970] primitives: use alloy `KECCAK_EMPTY` constant (#11851) --- Cargo.lock | 1 + crates/ethereum/evm/src/lib.rs | 3 ++- crates/optimism/evm/src/lib.rs | 3 ++- crates/primitives-traits/src/constants/mod.rs | 4 ---- crates/primitives/src/lib.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 3 ++- crates/storage/storage-api/Cargo.toml | 1 + crates/storage/storage-api/src/state.rs | 3 ++- crates/trie/common/src/proofs.rs | 5 +++-- 9 files changed, 14 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d9f74a207d..2aa5675cd87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9002,6 +9002,7 @@ dependencies = [ name = "reth-storage-api" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index ed18a24fb19..ac9bb5a0bbb 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -194,13 +194,14 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, KECCAK_EMPTY, + Header, }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index eb067da3256..3eda2878cae 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -208,6 +208,7 @@ impl ConfigureEvm for OptimismEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::ChainSpec; @@ -216,7 +217,7 @@ mod tests { use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, Receipt, Receipts, SealedBlockWithSenders, TxType, KECCAK_EMPTY, + Header, Receipt, Receipts, SealedBlockWithSenders, TxType, }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index eade399897b..33101b2c053 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -106,10 +106,6 @@ pub const SEPOLIA_GENESIS_HASH: B256 = pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); -/// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` -pub const KECCAK_EMPTY: B256 = - b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); - /// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 796090d79e5..a9e8c08203d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,7 +39,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::{HOLESKY_GENESIS_HASH, KECCAK_EMPTY, SEPOLIA_GENESIS_HASH}; +pub use constants::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7b11ce6afe6..f2fc13f5d03 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,13 +1,14 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. +use alloy_consensus::constants::KECCAK_EMPTY; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header, KECCAK_EMPTY}; +use reth_primitives::{BlockId, Header}; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 51d8eabfc40..0ae8b284588 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -26,5 +26,6 @@ reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 9a3b855ff14..d37940f0478 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -2,11 +2,12 @@ use super::{ AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider, StorageRootProvider, }; +use alloy_consensus::constants::KECCAK_EMPTY; use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Bytecode, KECCAK_EMPTY}; +use reth_primitives::Bytecode; use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Type alias of boxed [`StateProvider`]. diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 8aca67f8d1a..a94b2b96fbd 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -1,6 +1,7 @@ //! Merkle trie proofs. use crate::{Nibbles, TrieAccount}; +use alloy_consensus::constants::KECCAK_EMPTY; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ @@ -9,13 +10,13 @@ use alloy_trie::{ EMPTY_ROOT_HASH, }; use itertools::Itertools; -use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; +use reth_primitives_traits::Account; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes -/// in the paths of target accounts. +/// in the paths of target accounts. #[derive(Clone, Default, Debug)] pub struct MultiProof { /// State trie multiproof for requested accounts. From 5859f93c56352a619aaae6598ee91dfcc11cd38d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:53:21 +0200 Subject: [PATCH 027/970] primitives: use alloy `EMPTY_` constants (#11852) --- Cargo.lock | 1 + crates/chainspec/src/spec.rs | 5 +++-- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 3 ++- crates/primitives-traits/src/constants/mod.rs | 10 ---------- crates/primitives/src/alloy_compat.rs | 8 +++++--- 6 files changed, 12 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2aa5675cd87..2f8a1057124 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6340,6 +6340,7 @@ dependencies = [ name = "reth-basic-payload-builder" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "futures-core", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index adfee564d74..59e1a5ce1e1 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -2,6 +2,7 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use alloy_trie::EMPTY_ROOT_HASH; @@ -18,8 +19,8 @@ use reth_network_peers::{ }; use reth_primitives_traits::{ constants::{ - EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, - HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + EIP1559_INITIAL_BASE_FEE, ETHEREUM_BLOCK_GAS_LIMIT, HOLESKY_GENESIS_HASH, + SEPOLIA_GENESIS_HASH, }, Header, SealedHeader, }; diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 939eb5b54b7..f201df0c1bd 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -26,6 +26,7 @@ reth-tasks.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index f9487ec784c..835f20f3ef8 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::metrics::PayloadBuilderMetrics; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -18,7 +19,7 @@ use reth_payload_builder::{ }; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, + constants::{RETH_CLIENT_VERSION, SLOT_DURATION}, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, }; use reth_provider::{ diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 33101b2c053..a4918137e7c 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,6 +1,5 @@ //! Ethereum protocol-related constants -use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{address, b256, Address, B256, U256}; use core::time::Duration; @@ -112,15 +111,6 @@ pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeadde /// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); -/// Transactions root of empty receipts set. -pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; - -/// Transactions root of empty transactions set. -pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH; - -/// Withdrawals root of empty withdrawals set. -pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index c9bdfad89f5..0ac1458c5ac 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,11 +1,13 @@ //! Common conversions from alloy types. use crate::{ - constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, BlockBody, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, + transaction::extract_chain_id, Block, BlockBody, Signature, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy}; +use alloy_consensus::{ + constants::EMPTY_TRANSACTIONS, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy, +}; use alloy_primitives::{Parity, TxKind}; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; From cb604826b74859c41e2b39d5bfdcc1d4a12046e2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 18 Oct 2024 11:23:25 +0200 Subject: [PATCH 028/970] chore(sdk): Define `NodePrimitives::Block` (#11399) --- Cargo.lock | 2 ++ crates/ethereum/node/src/node.rs | 16 +++++++++++++--- crates/node/types/Cargo.toml | 4 +++- crates/node/types/src/lib.rs | 13 +++++++++---- crates/optimism/node/src/node.rs | 14 +++++++++++--- 5 files changed, 38 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f8a1057124..c8b4f76329e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8026,6 +8026,8 @@ dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", + "reth-primitives", + "reth-primitives-traits", ] [[package]] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 82f313fbb0b..a890810b00e 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -11,7 +11,9 @@ use reth_ethereum_engine_primitives::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; -use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB}; +use reth_node_api::{ + ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, NodeTypesWithDB, +}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, @@ -22,7 +24,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -33,6 +35,14 @@ use reth_transaction_pool::{ use crate::{EthEngineTypes, EthEvmConfig}; +/// Ethereum primitive types. +#[derive(Debug)] +pub struct EthPrimitives; + +impl NodePrimitives for EthPrimitives { + type Block = Block; +} + /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -69,7 +79,7 @@ impl EthereumNode { } impl NodeTypes for EthereumNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; } diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index f04925d9cd4..b28dcfba591 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -14,4 +14,6 @@ workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-engine-primitives.workspace = true \ No newline at end of file +reth-engine-primitives.workspace = true +reth-primitives.workspace = true +reth-primitives-traits.workspace = true \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2c72e02d3ed..5ba03e6795a 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub use reth_primitives_traits::{Block, BlockBody}; + use std::marker::PhantomData; use reth_chainspec::EthChainSpec; @@ -18,11 +20,14 @@ use reth_db_api::{ use reth_engine_primitives::EngineTypes; /// Configures all the primitive types of the node. -// TODO(mattsse): this is currently a placeholder -pub trait NodePrimitives {} +pub trait NodePrimitives { + /// Block primitive. + type Block; +} -// TODO(mattsse): Placeholder -impl NodePrimitives for () {} +impl NodePrimitives for () { + type Block = reth_primitives::Block; +} /// The type that configures the essential types of an Ethereum-like node. /// diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 648da85d0bb..c2576d318dd 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -6,7 +6,7 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::ConfigureEvm; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; -use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, @@ -21,7 +21,7 @@ use reth_optimism_consensus::OptimismBeaconConsensus; use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -36,6 +36,14 @@ use crate::{ OptimismEngineTypes, }; +/// Optimism primitive types. +#[derive(Debug)] +pub struct OpPrimitives; + +impl NodePrimitives for OpPrimitives { + type Block = Block; +} + /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -113,7 +121,7 @@ where } impl NodeTypes for OptimismNode { - type Primitives = (); + type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; } From cfd066c0714e7b8375e667995a9acc812f7dfd15 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 18 Oct 2024 11:43:23 +0200 Subject: [PATCH 029/970] chore(sdk): `SignedTransaction` abstraction (#11432) Co-authored-by: Matthias Seitz --- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/mod.rs | 0 .../{transaction.rs => transaction/mod.rs} | 2 + .../src/transaction/signed.rs | 72 +++++++++++++++++++ crates/primitives/src/traits/mod.rs | 9 +++ 5 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 crates/primitives-traits/src/mod.rs rename crates/primitives-traits/src/{transaction.rs => transaction/mod.rs} (97%) create mode 100644 crates/primitives-traits/src/transaction/signed.rs create mode 100644 crates/primitives/src/traits/mod.rs diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 8c54bd68c96..dd10ac9c5f1 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -24,7 +24,7 @@ pub mod receipt; pub use receipt::Receipt; pub mod transaction; -pub use transaction::Transaction; +pub use transaction::{signed::SignedTransaction, Transaction}; mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; diff --git a/crates/primitives-traits/src/mod.rs b/crates/primitives-traits/src/mod.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/crates/primitives-traits/src/transaction.rs b/crates/primitives-traits/src/transaction/mod.rs similarity index 97% rename from crates/primitives-traits/src/transaction.rs rename to crates/primitives-traits/src/transaction/mod.rs index 93645ead82e..a306c5f76ed 100644 --- a/crates/primitives-traits/src/transaction.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,5 +1,7 @@ //! Transaction abstraction +pub mod signed; + use alloc::fmt; use reth_codecs::Compact; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs new file mode 100644 index 00000000000..1bc8308b13f --- /dev/null +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -0,0 +1,72 @@ +//! API of a signed transaction. + +use alloc::fmt; +use core::hash::Hash; + +use alloy_consensus::Transaction; +use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_primitives::{keccak256, Address, TxHash, B256}; + +/// A signed transaction. +pub trait SignedTransaction: + fmt::Debug + + Clone + + PartialEq + + Eq + + Hash + + Send + + Sync + + serde::Serialize + + for<'a> serde::Deserialize<'a> + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + Encodable2718 + + Decodable2718 +{ + /// Transaction type that is signed. + type Transaction: Transaction; + + /// Signature type that results from signing transaction. + type Signature; + + /// Returns reference to transaction hash. + fn tx_hash(&self) -> &TxHash; + + /// Returns reference to transaction. + fn transaction(&self) -> &Self::Transaction; + + /// Returns reference to signature. + fn signature(&self) -> &Self::Signature; + + /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also `reth_primitives::transaction::recover_signer`. + /// + /// Note: + /// + /// This can fail for some early ethereum mainnet transactions pre EIP-2, use + /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that + /// the signature has a low `s` value. + fn recover_signer(&self) -> Option
; + + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` + /// value_. + /// + /// Returns `None` if the transaction's signature is invalid, see also + /// `reth_primitives::transaction::recover_signer_unchecked`. + fn recover_signer_unchecked(&self) -> Option
; + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + fn from_transaction_and_signature( + transaction: Self::Transaction, + signature: Self::Signature, + ) -> Self; + + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } +} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs new file mode 100644 index 00000000000..49fb73ea555 --- /dev/null +++ b/crates/primitives/src/traits/mod.rs @@ -0,0 +1,9 @@ +//! Abstractions of primitive data types + +pub mod block; +pub mod transaction; + +pub use block::{body::BlockBody, Block}; +pub use transaction::signed::SignedTransaction; + +pub use alloy_consensus::BlockHeader; From 8d32fd788bfa8c2e041bd1b93ecca37093a09c5a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 18 Oct 2024 14:45:51 +0400 Subject: [PATCH 030/970] feat: allow awaiting payload in progress (#11823) Co-authored-by: Matthias Seitz --- crates/e2e-test-utils/src/payload.rs | 2 +- crates/engine/local/src/miner.rs | 9 ++- crates/payload/basic/src/lib.rs | 15 +++- crates/payload/builder/src/lib.rs | 6 +- crates/payload/builder/src/noop.rs | 2 +- crates/payload/builder/src/service.rs | 79 +++++++++++----------- crates/payload/builder/src/test_utils.rs | 7 +- crates/payload/builder/src/traits.rs | 20 +++++- crates/payload/primitives/src/lib.rs | 22 ++++++ crates/payload/primitives/src/traits.rs | 40 +++++------ examples/custom-payload-builder/src/job.rs | 6 +- 11 files changed, 127 insertions(+), 81 deletions(-) diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 1f9a89307b6..946d9af5753 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -28,7 +28,7 @@ impl PayloadTestContext { ) -> eyre::Result { self.timestamp += 1; let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); - self.payload_builder.new_payload(attributes.clone()).await.unwrap(); + self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index f20d70b1489..8bcb7083aab 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -9,7 +9,7 @@ use reth_chainspec::EthereumHardforks; use reth_engine_primitives::EngineTypes; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ - BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, + BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, }; use reth_provider::{BlockReader, ChainSpecProvider}; use reth_rpc_types_compat::engine::payload::block_to_payload; @@ -202,10 +202,9 @@ where let payload_id = res.payload_id.ok_or_eyre("No payload id")?; - // wait for some time to let the payload be built - tokio::time::sleep(Duration::from_millis(200)).await; - - let Some(Ok(payload)) = self.payload_builder.best_payload(payload_id).await else { + let Some(Ok(payload)) = + self.payload_builder.resolve_kind(payload_id, PayloadKind::WaitForPending).await + else { eyre::bail!("No payload") }; diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 835f20f3ef8..7416283c1f5 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -17,7 +17,9 @@ use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_payload_builder::{ database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, }; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, +}; use reth_primitives::{ constants::{RETH_CLIENT_VERSION, SLOT_DURATION}, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, @@ -474,7 +476,10 @@ where Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let best_payload = self.best_payload.take(); if best_payload.is_none() && self.pending_block.is_none() { @@ -530,7 +535,11 @@ where }; } - let fut = ResolveBestPayload { best_payload, maybe_better, empty_payload }; + let fut = ResolveBestPayload { + best_payload, + maybe_better, + empty_payload: empty_payload.filter(|_| kind != PayloadKind::WaitForPending), + }; (fut, KeepPayloadJobAlive::No) } diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 70b4296da4e..0df15f5b0de 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -28,7 +28,7 @@ //! use std::pin::Pin; //! use std::task::{Context, Poll}; //! use alloy_primitives::U256; -//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator}; +//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; //! use reth_primitives::{Block, Header}; //! //! /// The generator type that creates new jobs that builds empty blocks. @@ -73,7 +73,7 @@ //! Ok(self.attributes.clone()) //! } //! -//! fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { +//! fn resolve_kind(&mut self, _kind: PayloadKind) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { //! let payload = self.best_payload(); //! (futures_util::future::ready(payload), KeepPayloadJobAlive::No) //! } @@ -112,7 +112,7 @@ pub mod noop; pub mod test_utils; pub use alloy_rpc_types::engine::PayloadId; -pub use reth_payload_primitives::PayloadBuilderError; +pub use reth_payload_primitives::{PayloadBuilderError, PayloadKind}; pub use service::{ PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, }; diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 06da7dcfada..cbf21f1cebf 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -51,7 +51,7 @@ where } PayloadServiceCommand::BestPayload(_, tx) => tx.send(None).ok(), PayloadServiceCommand::PayloadAttributes(_, tx) => tx.send(None).ok(), - PayloadServiceCommand::Resolve(_, tx) => tx.send(None).ok(), + PayloadServiceCommand::Resolve(_, _, tx) => tx.send(None).ok(), PayloadServiceCommand::Subscribe(_) => None, }; } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 1ebf6770c99..853c69e90d8 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -11,7 +11,7 @@ use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_payload_primitives::{ BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadTypes, + PayloadEvents, PayloadKind, PayloadTypes, }; use reth_provider::CanonStateNotification; use std::{ @@ -45,11 +45,20 @@ where /// /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the /// job, See [`PayloadJob::resolve`]. + pub async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + self.inner.resolve_kind(id, kind).await + } + + /// Resolves the payload job and returns the best payload that has been built so far. pub async fn resolve( &self, id: PayloadId, ) -> Option> { - self.inner.resolve(id).await + self.resolve_kind(id, PayloadKind::Earliest).await } /// Returns the best payload for the given identifier. @@ -110,16 +119,13 @@ where type PayloadType = T; type Error = PayloadBuilderError; - async fn send_and_resolve_payload( + fn send_new_payload( &self, attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error> { - let rx = self.send_new_payload(attr); - let id = rx.await??; - + ) -> Receiver> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::Resolve(id, tx)); - rx.await?.ok_or(PayloadBuilderError::MissingPayload) + let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + rx } /// Note: this does not resolve the job if it's still in progress. @@ -132,21 +138,17 @@ where rx.await.ok()? } - fn send_new_payload( + async fn resolve_kind( &self, - attr: ::PayloadBuilderAttributes, - ) -> Receiver> { + id: PayloadId, + kind: PayloadKind, + ) -> Option> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); - rx - } - - /// Note: if there's already payload in progress with same identifier, it will be returned. - async fn new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> Result { - self.send_new_payload(attr).await? + self.to_service.send(PayloadServiceCommand::Resolve(id, kind, tx)).ok()?; + match rx.await.transpose()? { + Ok(fut) => Some(fut.await), + Err(e) => Some(Err(e.into())), + } } async fn subscribe(&self) -> Result, Self::Error> { @@ -168,19 +170,6 @@ where Self { to_service } } - /// Resolves the payload job and returns the best payload that has been built so far. - /// - /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the - /// job, See [`PayloadJob::resolve`]. - async fn resolve(&self, id: PayloadId) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::Resolve(id, tx)).ok()?; - match rx.await.transpose()? { - Ok(fut) => Some(fut.await), - Err(e) => Some(Err(e.into())), - } - } - /// Returns the payload attributes associated with the given identifier. /// /// Note: this returns the attributes of the payload and does not resolve the job. @@ -296,11 +285,15 @@ where /// Returns the best payload for the given identifier that has been built so far and terminates /// the job if requested. - fn resolve(&mut self, id: PayloadId) -> Option> { + fn resolve( + &mut self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { trace!(%id, "resolving payload job"); let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?; - let (fut, keep_alive) = self.payload_jobs[job].0.resolve(); + let (fut, keep_alive) = self.payload_jobs[job].0.resolve_kind(kind); if keep_alive == KeepPayloadJobAlive::No { let (_, id) = self.payload_jobs.swap_remove(job); @@ -437,8 +430,8 @@ where let attributes = this.payload_attributes(id); let _ = tx.send(attributes); } - PayloadServiceCommand::Resolve(id, tx) => { - let _ = tx.send(this.resolve(id)); + PayloadServiceCommand::Resolve(id, strategy, tx) => { + let _ = tx.send(this.resolve(id, strategy)); } PayloadServiceCommand::Subscribe(tx) => { let new_rx = this.payload_events.subscribe(); @@ -469,7 +462,11 @@ pub enum PayloadServiceCommand { oneshot::Sender>>, ), /// Resolve the payload and return the payload - Resolve(PayloadId, oneshot::Sender>>), + Resolve( + PayloadId, + /* kind: */ PayloadKind, + oneshot::Sender>>, + ), /// Payload service events Subscribe(oneshot::Sender>>), } @@ -489,7 +486,7 @@ where Self::PayloadAttributes(f0, f1) => { f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() } - Self::Resolve(f0, _f1) => f.debug_tuple("Resolve").field(&f0).finish(), + Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), } } diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 55b9b84f45e..6990dc9b174 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_primitives::U256; use reth_chain_state::ExecutedBlock; -use reth_payload_primitives::{PayloadBuilderError, PayloadTypes}; +use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; use reth_primitives::Block; use reth_provider::CanonStateNotification; use std::{ @@ -96,7 +96,10 @@ impl PayloadJob for TestPayloadJob { Ok(self.attr.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let fut = futures_util::future::ready(self.best_payload()); (fut, KeepPayloadJobAlive::No) } diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 8d448eeff5a..62dadeb45d7 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,6 +1,8 @@ //! Trait abstractions used by the payload crate. -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, +}; use reth_provider::CanonStateNotification; use std::future::Future; @@ -53,7 +55,21 @@ pub trait PayloadJob: Future> + Send + /// If this returns [`KeepPayloadJobAlive::Yes`], then the [`PayloadJob`] will be polled /// once more. If this returns [`KeepPayloadJobAlive::No`] then the [`PayloadJob`] will be /// dropped after this call. - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + /// + /// The [`PayloadKind`] determines how the payload should be resolved in the + /// `ResolvePayloadFuture`. [`PayloadKind::Earliest`] should return the earliest available + /// payload (as fast as possible), e.g. racing an empty payload job against a pending job if + /// there's no payload available yet. [`PayloadKind::WaitForPending`] is allowed to wait + /// until a built payload is available. + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + + /// Resolves the payload as fast as possible. + fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + self.resolve_kind(PayloadKind::Earliest) + } } /// Whether the payload job should be kept alive or terminated after the payload was requested by diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 8173cae344a..08aa428000e 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -342,6 +342,28 @@ pub enum EngineApiMessageVersion { V4, } +/// Determines how we should choose the payload to return. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum PayloadKind { + /// Returns the next best available payload (the earliest available payload). + /// This does not wait for a real for pending job to finish if there's no best payload yet and + /// is allowed to race various payload jobs (empty, pending best) against each other and + /// returns whichever job finishes faster. + /// + /// This should be used when it's more important to return a valid payload as fast as possible. + /// For example, the engine API timeout for `engine_getPayload` is 1s and clients should rather + /// return an empty payload than indefinitely waiting for the pending payload job to finish and + /// risk missing the deadline. + #[default] + Earliest, + /// Only returns once we have at least one built payload. + /// + /// Compared to [`PayloadKind::Earliest`] this does not race an empty payload job against the + /// already in progress one, and returns the best available built payload or awaits the job in + /// progress. + WaitForPending, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 494ed68aa4e..ce98fcad32e 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,4 @@ -use crate::{PayloadBuilderError, PayloadEvents, PayloadTypes}; +use crate::{PayloadEvents, PayloadKind, PayloadTypes}; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types::{ engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, @@ -7,12 +7,8 @@ use alloy_rpc_types::{ use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chain_state::ExecutedBlock; use reth_primitives::{SealedBlock, Withdrawals}; -use std::{future::Future, pin::Pin}; use tokio::sync::oneshot; -pub(crate) type PayloadFuture

= - Pin> + Send + Sync>>; - /// A type that can request, subscribe to and resolve payloads. #[async_trait::async_trait] pub trait PayloadBuilder: Send + Unpin { @@ -21,12 +17,13 @@ pub trait PayloadBuilder: Send + Unpin { /// The error type returned by the builder. type Error; - /// Sends a message to the service to start building a new payload for the given payload - /// attributes and returns a future that resolves to the payload. - async fn send_and_resolve_payload( + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + fn send_new_payload( &self, attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error>; + ) -> oneshot::Receiver>; /// Returns the best payload for the given identifier. async fn best_payload( @@ -34,22 +31,21 @@ pub trait PayloadBuilder: Send + Unpin { id: PayloadId, ) -> Option::BuiltPayload, Self::Error>>; - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// This is the same as [`PayloadBuilder::new_payload`] but does not wait for the result - /// and returns the receiver instead - fn send_new_payload( + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; + id: PayloadId, + kind: PayloadKind, + ) -> Option::BuiltPayload, Self::Error>>; - /// Starts building a new payload for the given payload attributes. - /// - /// Returns the identifier of the payload. - async fn new_payload( + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve( &self, - attr: ::PayloadBuilderAttributes, - ) -> Result; + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>> { + self.resolve_kind(id, PayloadKind::Earliest).await + } /// Sends a message to the service to subscribe to payload events. /// Returns a receiver that will receive them. diff --git a/examples/custom-payload-builder/src/job.rs b/examples/custom-payload-builder/src/job.rs index 26b594be94b..01419825959 100644 --- a/examples/custom-payload-builder/src/job.rs +++ b/examples/custom-payload-builder/src/job.rs @@ -3,6 +3,7 @@ use reth::{ providers::StateProviderFactory, tasks::TaskSpawner, transaction_pool::TransactionPool, }; use reth_basic_payload_builder::{PayloadBuilder, PayloadConfig}; +use reth_node_api::PayloadKind; use reth_payload_builder::{KeepPayloadJobAlive, PayloadBuilderError, PayloadJob}; use std::{ @@ -52,7 +53,10 @@ where Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let payload = self.best_payload(); (futures_util::future::ready(payload), KeepPayloadJobAlive::No) } From 9c8f5d89d884e3a4c0b2e8120b5f4c6121e626f1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Oct 2024 13:47:56 +0200 Subject: [PATCH 031/970] chore: rm v2 get bodies functions (#11870) --- crates/rpc/rpc-api/src/engine.rs | 23 ++------ crates/rpc/rpc-engine-api/src/capabilities.rs | 2 - crates/rpc/rpc-engine-api/src/engine_api.rs | 52 ++----------------- crates/rpc/rpc-engine-api/src/metrics.rs | 4 -- .../rpc-types-compat/src/engine/payload.rs | 50 +----------------- 5 files changed, 8 insertions(+), 123 deletions(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 50181d23a75..eedada8ffa7 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -10,9 +10,9 @@ use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_rpc_types_engine::{ - ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, - PayloadId, PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, + ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, TransitionConfiguration, }; use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; @@ -146,13 +146,6 @@ pub trait EngineApi { block_hashes: Vec, ) -> RpcResult; - /// See also - #[method(name = "getPayloadBodiesByHashV2")] - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult; - /// See also /// /// Returns the execution payload bodies by the range starting at `start`, containing `count` @@ -172,16 +165,6 @@ pub trait EngineApi { count: U64, ) -> RpcResult; - /// See also - /// - /// Similar to `getPayloadBodiesByRangeV1`, but returns [`ExecutionPayloadBodiesV2`] - #[method(name = "getPayloadBodiesByRangeV2")] - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult; - /// See also /// /// Note: This method will be deprecated after the cancun hardfork: diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index de4d9623153..af0609b0d1f 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -17,8 +17,6 @@ pub const CAPABILITIES: &[&str] = &[ "engine_newPayloadV4", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", - "engine_getPayloadBodiesByHashV2", - "engine_getPayloadBodiesByRangeV2", "engine_getBlobsV1", ]; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 252808c14a7..fb7f98ed203 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -5,9 +5,8 @@ use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -23,7 +22,7 @@ use reth_payload_primitives::{ use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ - convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -451,18 +450,6 @@ where self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await } - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. - /// - /// Same as [`Self::get_payload_bodies_by_range_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_range_v2( - &self, - start: BlockNumber, - count: u64, - ) -> EngineApiResult { - self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v2).await - } - /// Called to retrieve execution payload bodies by hashes. async fn get_payload_bodies_by_hash_with( &self, @@ -509,16 +496,6 @@ where self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1).await } - /// Called to retrieve execution payload bodies by hashes. - /// - /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_hash_v2( - &self, - hashes: Vec, - ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2).await - } - /// Called to verify network configuration parameters and ensure that Consensus and Execution /// layers are using the latest configuration. pub fn exchange_transition_configuration( @@ -846,17 +823,6 @@ where Ok(res.await?) } - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV2"); - let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); - self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); - Ok(res.await?) - } - /// Handler for `engine_getPayloadBodiesByRangeV1` /// /// See also @@ -885,18 +851,6 @@ where Ok(res?) } - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV2"); - let start_time = Instant::now(); - let res = Self::get_payload_bodies_by_range_v2(self, start.to(), count.to()).await; - self.inner.metrics.latency.get_payload_bodies_by_range_v2.record(start_time.elapsed()); - Ok(res?) - } - /// Handler for `engine_exchangeTransitionConfigurationV1` /// See also async fn exchange_transition_configuration( diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 2c4216664ae..8d0106f9dd9 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -44,12 +44,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByRangeV2` - pub(crate) get_payload_bodies_by_range_v2: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` pub(crate) get_payload_bodies_by_hash_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByHashV2` - pub(crate) get_payload_bodies_by_hash_v2: Histogram, /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index e6f2f97ca75..cd9ce1cbf7d 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -6,8 +6,8 @@ use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ExecutionPayloadV4, PayloadError, }; use reth_primitives::{ constants::MAXIMUM_EXTRA_DATA_SIZE, @@ -381,52 +381,6 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { } } -/// Converts [`Block`] to [`ExecutionPayloadBodyV2`] -pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); - - let mut payload = ExecutionPayloadBodyV2 { - transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), - deposit_requests: None, - withdrawal_requests: None, - consolidation_requests: None, - }; - - if let Some(requests) = value.body.requests { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - requests.into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - payload.deposit_requests = Some(deposit_requests); - payload.withdrawal_requests = Some(withdrawal_requests); - payload.consolidation_requests = Some(consolidation_requests); - } - - payload -} - /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); From 587c91f1cf988cf9f99bb251bf5da7f86ff5600d Mon Sep 17 00:00:00 2001 From: Ayodeji Akinola Date: Fri, 18 Oct 2024 16:17:11 +0100 Subject: [PATCH 032/970] Optimize Sender Recovery Process (#11385) --- .../stages/src/stages/sender_recovery.rs | 146 +++++++++++------- 1 file changed, 86 insertions(+), 60 deletions(-) diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a85b0bc60cc..a4eda6394c0 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -29,6 +29,9 @@ const BATCH_SIZE: usize = 100_000; /// Maximum number of senders to recover per rayon worker job. const WORKER_CHUNK_SIZE: usize = 100; +/// Type alias for a sender that transmits the result of sender recovery. +type RecoveryResultSender = mpsc::Sender>>; + /// The sender recovery stage iterates over existing transactions, /// recovers the transaction signer and stores them /// in [`TransactionSenders`][reth_db::tables::TransactionSenders] table. @@ -100,8 +103,10 @@ where .map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, tx_range.end)) .collect::>>(); + let tx_batch_sender = setup_range_recovery(provider); + for range in batch { - recover_range(range, provider, &mut senders_cursor)?; + recover_range(range, provider, tx_batch_sender.clone(), &mut senders_cursor)?; } Ok(ExecOutput { @@ -136,15 +141,16 @@ where fn recover_range( tx_range: Range, provider: &Provider, + tx_batch_sender: mpsc::Sender, RecoveryResultSender)>>, senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, CURSOR: DbCursorRW, { - debug!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders batch"); + debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); - // Preallocate channels + // Preallocate channels for each chunks in the batch let (chunks, receivers): (Vec<_>, Vec<_>) = tx_range .clone() .step_by(WORKER_CHUNK_SIZE) @@ -156,62 +162,9 @@ where }) .unzip(); - let static_file_provider = provider.static_file_provider(); - - // We do not use `tokio::task::spawn_blocking` because, during a shutdown, - // there will be a timeout grace period in which Tokio does not allow spawning - // additional blocking tasks. This would cause this function to return - // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. - // - // However, using `std::thread::spawn` allows us to utilize the timeout grace - // period to complete some work without throwing errors during the shutdown. - std::thread::spawn(move || { - for (chunk_range, recovered_senders_tx) in chunks { - // Read the raw value, and let the rayon worker to decompress & decode. - let chunk = match static_file_provider.fetch_range_with_predicate( - StaticFileSegment::Transactions, - chunk_range.clone(), - |cursor, number| { - Ok(cursor - .get_one::>>( - number.into(), - )? - .map(|tx| (number, tx))) - }, - |_| true, - ) { - Ok(chunk) => chunk, - Err(err) => { - // We exit early since we could not process this chunk. - let _ = recovered_senders_tx - .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); - break - } - }; - - // Spawn the task onto the global rayon pool - // This task will send the results through the channel after it has read the transaction - // and calculated the sender. - rayon::spawn(move || { - let mut rlp_buf = Vec::with_capacity(128); - for (number, tx) in chunk { - let res = tx - .value() - .map_err(|err| Box::new(SenderRecoveryStageError::StageError(err.into()))) - .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); - - let is_err = res.is_err(); - - let _ = recovered_senders_tx.send(res); - - // Finish early - if is_err { - break - } - } - }); - } - }); + if let Some(err) = tx_batch_sender.send(chunks).err() { + return Err(StageError::Fatal(err.into())); + } debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database"); @@ -235,6 +188,7 @@ where provider.sealed_header(block_number)?.ok_or_else(|| { ProviderError::HeaderNotFound(block_number.into()) })?; + Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( @@ -269,10 +223,82 @@ where .into(), )); } - Ok(()) } +/// Spawns a thread to handle the recovery of transaction senders for +/// specified chunks of a given batch. It processes incoming ranges, fetching and recovering +/// transactions in parallel using global rayon pool +fn setup_range_recovery( + provider: &Provider, +) -> mpsc::Sender, RecoveryResultSender)>> +where + Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, +{ + let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); + let static_file_provider = provider.static_file_provider(); + + // We do not use `tokio::task::spawn_blocking` because, during a shutdown, + // there will be a timeout grace period in which Tokio does not allow spawning + // additional blocking tasks. This would cause this function to return + // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. + // + // However, using `std::thread::spawn` allows us to utilize the timeout grace + // period to complete some work without throwing errors during the shutdown. + std::thread::spawn(move || { + while let Ok(chunks) = tx_receiver.recv() { + for (chunk_range, recovered_senders_tx) in chunks { + // Read the raw value, and let the rayon worker to decompress & decode. + let chunk = match static_file_provider.fetch_range_with_predicate( + StaticFileSegment::Transactions, + chunk_range.clone(), + |cursor, number| { + Ok(cursor + .get_one::>>( + number.into(), + )? + .map(|tx| (number, tx))) + }, + |_| true, + ) { + Ok(chunk) => chunk, + Err(err) => { + // We exit early since we could not process this chunk. + let _ = recovered_senders_tx + .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); + break + } + }; + + // Spawn the task onto the global rayon pool + // This task will send the results through the channel after it has read the + // transaction and calculated the sender. + rayon::spawn(move || { + let mut rlp_buf = Vec::with_capacity(128); + for (number, tx) in chunk { + let res = tx + .value() + .map_err(|err| { + Box::new(SenderRecoveryStageError::StageError(err.into())) + }) + .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); + + let is_err = res.is_err(); + + let _ = recovered_senders_tx.send(res); + + // Finish early + if is_err { + break + } + } + }); + } + } + }); + tx_sender +} + #[inline] fn recover_sender( (tx_id, tx): (TxNumber, TransactionSignedNoHash), From a908f977736ebd0d609fc3d2f4a6904a14f0bd73 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Oct 2024 20:21:55 +0200 Subject: [PATCH 033/970] chore: simplify update fn (#11880) --- crates/transaction-pool/src/pool/pending.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ff3ecf65a49..ff5269014c4 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -197,7 +197,7 @@ impl PendingPool { } } else { self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); self.by_id.insert(id, tx); } @@ -243,7 +243,7 @@ impl PendingPool { tx.priority = self.ordering.priority(&tx.transaction.transaction, base_fee); self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); self.by_id.insert(id, tx); } @@ -254,12 +254,8 @@ impl PendingPool { /// Updates the independent transaction and highest nonces set, assuming the given transaction /// is being _added_ to the pool. - fn update_independents_and_highest_nonces( - &mut self, - tx: &PendingTransaction, - tx_id: &TransactionId, - ) { - let ancestor_id = tx_id.unchecked_ancestor(); + fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction) { + let ancestor_id = tx.transaction.id().unchecked_ancestor(); if let Some(ancestor) = ancestor_id.and_then(|id| self.by_id.get(&id)) { // the transaction already has an ancestor, so we only need to ensure that the // highest nonces set actually contains the highest nonce for that sender @@ -305,7 +301,7 @@ impl PendingPool { let priority = self.ordering.priority(&tx.transaction, base_fee); let tx = PendingTransaction { submission_id, transaction: tx, priority }; - self.update_independents_and_highest_nonces(&tx, &tx_id); + self.update_independents_and_highest_nonces(&tx); self.all.insert(tx.clone()); // send the new transaction to any existing pendingpool static file iterators From eee5e0d41f601dca62339b09b666432377717bd0 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 18 Oct 2024 22:08:09 +0200 Subject: [PATCH 034/970] bump rust to 1.82 (#11876) --- .github/workflows/lint.yml | 4 ++-- Cargo.toml | 6 ++---- README.md | 2 +- clippy.toml | 18 ++++++++++++++++-- .../src/providers/static_file/manager.rs | 1 + 5 files changed, 22 insertions(+), 9 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index efa38857e06..65c01c3a491 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -103,7 +103,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.81" # MSRV + toolchain: "1.82" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -163,7 +163,7 @@ jobs: - uses: dtolnay/rust-toolchain@nightly - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.81" # MSRV + toolchain: "1.82" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true diff --git a/Cargo.toml b/Cargo.toml index e3ec1c1fb4a..54111096902 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] version = "1.1.0" edition = "2021" -rust-version = "1.81" +rust-version = "1.82" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -410,9 +410,7 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "14.0.3", features = [ - "std", -], default-features = false } +revm = { version = "14.0.3", features = ["std"], default-features = false } revm-inspectors = "0.8.1" revm-primitives = { version = "10.0.0", features = [ "std", diff --git a/README.md b/README.md index 7fae4d0b62c..8a6b8ddb42f 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ When updating this, also update: - .github/workflows/lint.yml --> -The Minimum Supported Rust Version (MSRV) of this project is [1.81.0](https://blog.rust-lang.org/2024/09/05/Rust-1.81.0.html). +The Minimum Supported Rust Version (MSRV) of this project is [1.82.0](https://blog.rust-lang.org/2024/10/17/Rust-1.82.0.html). See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html). diff --git a/clippy.toml b/clippy.toml index cdfa4bc93a2..862c568634e 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,17 @@ -msrv = "1.81" +msrv = "1.82" too-large-for-stack = 128 -doc-valid-idents = ["P2P", "ExEx", "ExExes", "IPv4", "IPv6", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "WAL", "MessagePack"] +doc-valid-idents = [ + "P2P", + "ExEx", + "ExExes", + "IPv4", + "IPv6", + "KiB", + "MiB", + "GiB", + "TiB", + "PiB", + "EiB", + "WAL", + "MessagePack", +] diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e233332a0e9..6f5cf07c95c 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -143,6 +143,7 @@ impl StaticFileProvider { // appending/truncating rows for segment in event.paths { // Ensure it's a file with the .conf extension + #[allow(clippy::nonminimal_bool)] if !segment .extension() .is_some_and(|s| s.to_str() == Some(CONFIG_FILE_EXTENSION)) From 655fc1a55a6dae5dfd522f1981142524c0ef6924 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 19 Oct 2024 00:13:02 +0200 Subject: [PATCH 035/970] rpc: add unit tests for `RpcModuleSelection` (#11883) --- crates/rpc/rpc-server-types/src/module.rs | 235 +++++++++++++++++++++- 1 file changed, 232 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 72a5e7c8583..56417dda701 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -199,9 +199,12 @@ impl FromStr for RpcModuleSelection { } let mut modules = s.split(',').map(str::trim).peekable(); let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; - match first { - "all" | "All" => Ok(Self::All), - "none" | "None" => Ok(Self::Selection(Default::default())), + // We convert to lowercase to make the comparison case-insensitive + // + // This is a way to allow typing "all" and "ALL" and "All" and "aLl" etc. + match first.to_lowercase().as_str() { + "all" => Ok(Self::All), + "none" => Ok(Self::Selection(Default::default())), _ => Self::try_from_selection(modules), } } @@ -329,3 +332,229 @@ impl Serialize for RethRpcModule { s.serialize_str(self.as_ref()) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_all_modules() { + let all_modules = RpcModuleSelection::all_modules(); + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + } + + #[test] + fn test_standard_modules() { + let standard_modules = RpcModuleSelection::standard_modules(); + let expected_modules: HashSet = + HashSet::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert_eq!(standard_modules, expected_modules); + } + + #[test] + fn test_default_ipc_modules() { + let default_ipc_modules = RpcModuleSelection::default_ipc_modules(); + assert_eq!(default_ipc_modules, RpcModuleSelection::all_modules()); + } + + #[test] + fn test_try_from_selection_success() { + let selection = vec!["eth", "admin"]; + let config = RpcModuleSelection::try_from_selection(selection).unwrap(); + assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin])); + } + + #[test] + fn test_rpc_module_selection_len() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + assert_eq!(standard.len(), 3); + assert_eq!(selection.len(), 2); + } + + #[test] + fn test_rpc_module_selection_is_empty() { + let empty_selection = RpcModuleSelection::from(HashSet::new()); + assert!(empty_selection.is_empty()); + + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!non_empty_selection.is_empty()); + } + + #[test] + fn test_rpc_module_selection_iter_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.iter_selection().count(), RethRpcModule::variant_count()); + assert_eq!(standard.iter_selection().count(), 3); + assert_eq!(selection.iter_selection().count(), 2); + } + + #[test] + fn test_rpc_module_selection_to_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.to_selection(), RpcModuleSelection::all_modules()); + assert_eq!(standard.to_selection(), RpcModuleSelection::standard_modules()); + assert_eq!( + selection.to_selection(), + HashSet::from([RethRpcModule::Eth, RethRpcModule::Admin]) + ); + } + + #[test] + fn test_rpc_module_selection_are_identical() { + // Test scenario: both selections are `All` + // + // Since both selections include all possible RPC modules, they should be considered + // identical. + let all_modules = RpcModuleSelection::All; + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&all_modules))); + + // Test scenario: both `http` and `ws` are `None` + // + // When both arguments are `None`, the function should return `true` because no modules are + // selected. + assert!(RpcModuleSelection::are_identical(None, None)); + + // Test scenario: both selections contain identical sets of specific modules + // + // In this case, both selections contain the same modules (`Eth` and `Admin`), + // so they should be considered identical. + let selection1 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + let selection2 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert!(RpcModuleSelection::are_identical(Some(&selection1), Some(&selection2))); + + // Test scenario: one selection is `All`, the other is `Standard` + // + // `All` includes all possible modules, while `Standard` includes a specific set of modules. + // Since `Standard` does not cover all modules, these two selections should not be + // considered identical. + let standard = RpcModuleSelection::Standard; + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&standard))); + + // Test scenario: one is `None`, the other is an empty selection + // + // When one selection is `None` and the other is an empty selection (no modules), + // they should be considered identical because neither selects any modules. + let empty_selection = RpcModuleSelection::Selection(HashSet::new()); + assert!(RpcModuleSelection::are_identical(None, Some(&empty_selection))); + assert!(RpcModuleSelection::are_identical(Some(&empty_selection), None)); + + // Test scenario: one is `None`, the other is a non-empty selection + // + // If one selection is `None` and the other contains modules, they should not be considered + // identical because `None` represents no selection, while the other explicitly + // selects modules. + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!RpcModuleSelection::are_identical(None, Some(&non_empty_selection))); + assert!(!RpcModuleSelection::are_identical(Some(&non_empty_selection), None)); + + // Test scenario: `All` vs. non-full selection + // + // If one selection is `All` (which includes all modules) and the other contains only a + // subset of modules, they should not be considered identical. + let partial_selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&partial_selection))); + + // Test scenario: full selection vs `All` + // + // If the other selection explicitly selects all available modules, it should be identical + // to `All`. + let full_selection = + RpcModuleSelection::from(RethRpcModule::modules().into_iter().collect::>()); + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&full_selection))); + + // Test scenario: different non-empty selections + // + // If the two selections contain different sets of modules, they should not be considered + // identical. + let selection3 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + let selection4 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Web3]); + assert!(!RpcModuleSelection::are_identical(Some(&selection3), Some(&selection4))); + + // Test scenario: `Standard` vs an equivalent selection + // The `Standard` selection includes a predefined set of modules. If we explicitly create + // a selection with the same set of modules, they should be considered identical. + let matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert!(RpcModuleSelection::are_identical(Some(&standard), Some(&matching_standard))); + + // Test scenario: `Standard` vs non-matching selection + // + // If the selection does not match the modules included in `Standard`, they should not be + // considered identical. + let non_matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&standard), Some(&non_matching_standard))); + } + + #[test] + fn test_rpc_module_selection_from_str() { + // Test empty string returns default selection + let result = RpcModuleSelection::from_str(""); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test "all" (case insensitive) returns All variant + let result = RpcModuleSelection::from_str("all"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("All"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("ALL"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + // Test "none" (case insensitive) returns empty selection + let result = RpcModuleSelection::from_str("none"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("None"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("NONE"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test valid selections: "eth,admin" + let result = RpcModuleSelection::from_str("eth,admin"); + assert!(result.is_ok()); + let expected_selection = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert_eq!(result.unwrap(), expected_selection); + + // Test valid selection with extra spaces: " eth , admin " + let result = RpcModuleSelection::from_str(" eth , admin "); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), expected_selection); + + // Test invalid selection should return error + let result = RpcModuleSelection::from_str("invalid,unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + + // Test single valid selection: "eth" + let result = RpcModuleSelection::from_str("eth"); + assert!(result.is_ok()); + let expected_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert_eq!(result.unwrap(), expected_selection); + + // Test single invalid selection: "unknown" + let result = RpcModuleSelection::from_str("unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + } +} From a4126b3a53f900483fe8946c8067aade68cf5cad Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Sat, 19 Oct 2024 00:15:08 +0200 Subject: [PATCH 036/970] feat: tasks executor metrics in grafana (#11815) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- etc/docker-compose.yml | 2 +- etc/grafana/dashboards/overview.json | 399 +++++++++++++++++++++------ 2 files changed, 315 insertions(+), 86 deletions(-) diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 618aa6f5ae6..cd7dd6dd263 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -65,7 +65,7 @@ services: sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && /run.sh" - + volumes: mainnet_data: driver: local diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 15786764f42..8c77f5979fe 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1007,13 +1007,242 @@ "title": "Sync progress (stage progress as highest block number reached)", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of critical tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 0 + } + ] + }, + "unit": "tasks" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 248, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_critical_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor critical tasks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of regular tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 247, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_regular_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular tasks", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 28 }, "id": 38, "panels": [], @@ -1085,7 +1314,7 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 29 }, "id": 40, "options": { @@ -1145,7 +1374,7 @@ "h": 8, "w": 12, "x": 12, - "y": 21 + "y": 29 }, "id": 42, "maxDataPoints": 25, @@ -1273,7 +1502,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 37 }, "id": 117, "options": { @@ -1370,7 +1599,7 @@ "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 37 }, "id": 116, "options": { @@ -1471,7 +1700,7 @@ "h": 8, "w": 12, "x": 0, - "y": 37 + "y": 45 }, "id": 119, "options": { @@ -1572,7 +1801,7 @@ "h": 8, "w": 12, "x": 12, - "y": 37 + "y": 45 }, "id": 118, "options": { @@ -1634,7 +1863,7 @@ "h": 8, "w": 12, "x": 0, - "y": 45 + "y": 53 }, "id": 48, "options": { @@ -1746,7 +1975,7 @@ "h": 8, "w": 12, "x": 12, - "y": 45 + "y": 53 }, "id": 52, "options": { @@ -1804,7 +2033,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 61 }, "id": 50, "options": { @@ -1972,7 +2201,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 61 }, "id": 58, "options": { @@ -2073,7 +2302,7 @@ "h": 8, "w": 12, "x": 0, - "y": 61 + "y": 69 }, "id": 113, "options": { @@ -2110,7 +2339,7 @@ "h": 1, "w": 24, "x": 0, - "y": 69 + "y": 77 }, "id": 203, "panels": [], @@ -2144,7 +2373,7 @@ "h": 8, "w": 8, "x": 0, - "y": 70 + "y": 78 }, "id": 202, "options": { @@ -2305,7 +2534,7 @@ "h": 8, "w": 8, "x": 8, - "y": 70 + "y": 78 }, "id": 204, "options": { @@ -2455,7 +2684,7 @@ "h": 8, "w": 8, "x": 16, - "y": 70 + "y": 78 }, "id": 205, "options": { @@ -2556,7 +2785,7 @@ "h": 8, "w": 12, "x": 0, - "y": 78 + "y": 86 }, "id": 206, "options": { @@ -2653,7 +2882,7 @@ "h": 8, "w": 12, "x": 12, - "y": 78 + "y": 86 }, "id": 207, "options": { @@ -2690,7 +2919,7 @@ "h": 1, "w": 24, "x": 0, - "y": 86 + "y": 94 }, "id": 46, "panels": [], @@ -2761,7 +2990,7 @@ "h": 8, "w": 24, "x": 0, - "y": 87 + "y": 95 }, "id": 56, "options": { @@ -2857,7 +3086,7 @@ "h": 11, "w": 24, "x": 0, - "y": 95 + "y": 103 }, "id": 240, "options": { @@ -2916,7 +3145,7 @@ "h": 1, "w": 24, "x": 0, - "y": 106 + "y": 114 }, "id": 24, "panels": [], @@ -3014,7 +3243,7 @@ "h": 8, "w": 12, "x": 0, - "y": 107 + "y": 115 }, "id": 26, "options": { @@ -3148,7 +3377,7 @@ "h": 8, "w": 12, "x": 12, - "y": 107 + "y": 115 }, "id": 33, "options": { @@ -3268,7 +3497,7 @@ "h": 8, "w": 12, "x": 0, - "y": 115 + "y": 123 }, "id": 36, "options": { @@ -3317,7 +3546,7 @@ "h": 1, "w": 24, "x": 0, - "y": 123 + "y": 131 }, "id": 32, "panels": [], @@ -3425,7 +3654,7 @@ "h": 8, "w": 12, "x": 0, - "y": 124 + "y": 132 }, "id": 30, "options": { @@ -3591,7 +3820,7 @@ "h": 8, "w": 12, "x": 12, - "y": 124 + "y": 132 }, "id": 28, "options": { @@ -3711,7 +3940,7 @@ "h": 8, "w": 12, "x": 0, - "y": 132 + "y": 140 }, "id": 35, "options": { @@ -3837,7 +4066,7 @@ "h": 8, "w": 12, "x": 12, - "y": 132 + "y": 140 }, "id": 73, "options": { @@ -3964,7 +4193,7 @@ "h": 8, "w": 12, "x": 0, - "y": 140 + "y": 148 }, "id": 102, "options": { @@ -4027,7 +4256,7 @@ "h": 1, "w": 24, "x": 0, - "y": 148 + "y": 156 }, "id": 79, "panels": [], @@ -4101,7 +4330,7 @@ "h": 8, "w": 12, "x": 0, - "y": 149 + "y": 157 }, "id": 74, "options": { @@ -4198,7 +4427,7 @@ "h": 8, "w": 12, "x": 12, - "y": 149 + "y": 157 }, "id": 80, "options": { @@ -4295,7 +4524,7 @@ "h": 8, "w": 12, "x": 0, - "y": 157 + "y": 165 }, "id": 81, "options": { @@ -4392,7 +4621,7 @@ "h": 8, "w": 12, "x": 12, - "y": 157 + "y": 165 }, "id": 114, "options": { @@ -4489,7 +4718,7 @@ "h": 8, "w": 12, "x": 12, - "y": 165 + "y": 173 }, "id": 190, "options": { @@ -4527,7 +4756,7 @@ "h": 1, "w": 24, "x": 0, - "y": 173 + "y": 181 }, "id": 87, "panels": [], @@ -4601,7 +4830,7 @@ "h": 8, "w": 12, "x": 0, - "y": 174 + "y": 182 }, "id": 83, "options": { @@ -4697,7 +4926,7 @@ "h": 8, "w": 12, "x": 12, - "y": 174 + "y": 182 }, "id": 84, "options": { @@ -4805,7 +5034,7 @@ "h": 8, "w": 12, "x": 0, - "y": 182 + "y": 190 }, "id": 85, "options": { @@ -4902,7 +5131,7 @@ "h": 8, "w": 12, "x": 12, - "y": 182 + "y": 190 }, "id": 210, "options": { @@ -5227,7 +5456,7 @@ "h": 8, "w": 12, "x": 0, - "y": 190 + "y": 198 }, "id": 211, "options": { @@ -5552,7 +5781,7 @@ "h": 8, "w": 12, "x": 12, - "y": 190 + "y": 198 }, "id": 212, "options": { @@ -5775,9 +6004,9 @@ "h": 8, "w": 24, "x": 0, - "y": 198 + "y": 206 }, - "id": 213, + "id": 213, "options": { "legend": { "calcs": [], @@ -5811,7 +6040,7 @@ "h": 1, "w": 24, "x": 0, - "y": 198 + "y": 214 }, "id": 214, "panels": [], @@ -5883,7 +6112,7 @@ "h": 8, "w": 12, "x": 0, - "y": 199 + "y": 215 }, "id": 215, "options": { @@ -5979,7 +6208,7 @@ "h": 8, "w": 12, "x": 12, - "y": 199 + "y": 215 }, "id": 216, "options": { @@ -6030,7 +6259,7 @@ "h": 1, "w": 24, "x": 0, - "y": 207 + "y": 223 }, "id": 68, "panels": [], @@ -6104,7 +6333,7 @@ "h": 8, "w": 12, "x": 0, - "y": 208 + "y": 224 }, "id": 60, "options": { @@ -6200,7 +6429,7 @@ "h": 8, "w": 12, "x": 12, - "y": 208 + "y": 224 }, "id": 62, "options": { @@ -6296,7 +6525,7 @@ "h": 8, "w": 12, "x": 0, - "y": 216 + "y": 232 }, "id": 64, "options": { @@ -6333,7 +6562,7 @@ "h": 1, "w": 24, "x": 0, - "y": 224 + "y": 240 }, "id": 97, "panels": [], @@ -6418,7 +6647,7 @@ "h": 8, "w": 12, "x": 0, - "y": 225 + "y": 241 }, "id": 98, "options": { @@ -6581,7 +6810,7 @@ "h": 8, "w": 12, "x": 12, - "y": 225 + "y": 241 }, "id": 101, "options": { @@ -6679,7 +6908,7 @@ "h": 8, "w": 12, "x": 0, - "y": 233 + "y": 249 }, "id": 99, "options": { @@ -6777,7 +7006,7 @@ "h": 8, "w": 12, "x": 12, - "y": 233 + "y": 249 }, "id": 100, "options": { @@ -6815,7 +7044,7 @@ "h": 1, "w": 24, "x": 0, - "y": 241 + "y": 257 }, "id": 105, "panels": [], @@ -6888,7 +7117,7 @@ "h": 8, "w": 12, "x": 0, - "y": 242 + "y": 258 }, "id": 106, "options": { @@ -6986,7 +7215,7 @@ "h": 8, "w": 12, "x": 12, - "y": 242 + "y": 258 }, "id": 107, "options": { @@ -7083,7 +7312,7 @@ "h": 8, "w": 12, "x": 0, - "y": 250 + "y": 266 }, "id": 217, "options": { @@ -7121,7 +7350,7 @@ "h": 1, "w": 24, "x": 0, - "y": 258 + "y": 274 }, "id": 108, "panels": [], @@ -7219,7 +7448,7 @@ "h": 8, "w": 12, "x": 0, - "y": 259 + "y": 275 }, "id": 109, "options": { @@ -7281,7 +7510,7 @@ "h": 8, "w": 12, "x": 12, - "y": 259 + "y": 275 }, "id": 111, "maxDataPoints": 25, @@ -7411,7 +7640,7 @@ "h": 8, "w": 12, "x": 0, - "y": 267 + "y": 283 }, "id": 120, "options": { @@ -7469,7 +7698,7 @@ "h": 8, "w": 12, "x": 12, - "y": 267 + "y": 283 }, "id": 112, "maxDataPoints": 25, @@ -7623,7 +7852,7 @@ "h": 8, "w": 12, "x": 0, - "y": 275 + "y": 291 }, "id": 198, "options": { @@ -7809,9 +8038,9 @@ "h": 8, "w": 12, "x": 12, - "y": 275 + "y": 291 }, - "id": 213, + "id": 246, "options": { "legend": { "calcs": [], @@ -7848,7 +8077,7 @@ "h": 1, "w": 24, "x": 0, - "y": 283 + "y": 299 }, "id": 236, "panels": [], @@ -7920,7 +8149,7 @@ "h": 8, "w": 12, "x": 0, - "y": 284 + "y": 300 }, "id": 237, "options": { @@ -8017,7 +8246,7 @@ "h": 8, "w": 12, "x": 12, - "y": 284 + "y": 300 }, "id": 238, "options": { @@ -8114,7 +8343,7 @@ "h": 8, "w": 12, "x": 0, - "y": 292 + "y": 308 }, "id": 239, "options": { @@ -8223,7 +8452,7 @@ "h": 8, "w": 12, "x": 12, - "y": 292 + "y": 308 }, "id": 219, "options": { @@ -8288,7 +8517,7 @@ "h": 8, "w": 12, "x": 0, - "y": 300 + "y": 316 }, "id": 220, "options": { @@ -8332,7 +8561,7 @@ "h": 1, "w": 24, "x": 0, - "y": 308 + "y": 324 }, "id": 241, "panels": [], @@ -8405,7 +8634,7 @@ "h": 8, "w": 12, "x": 0, - "y": 309 + "y": 325 }, "id": 243, "options": { @@ -8517,7 +8746,7 @@ "h": 8, "w": 12, "x": 12, - "y": 309 + "y": 325 }, "id": 244, "options": { @@ -8630,7 +8859,7 @@ "h": 8, "w": 12, "x": 0, - "y": 317 + "y": 333 }, "id": 245, "options": { @@ -8669,7 +8898,7 @@ "h": 1, "w": 24, "x": 0, - "y": 325 + "y": 341 }, "id": 226, "panels": [], @@ -8767,7 +8996,7 @@ "h": 8, "w": 12, "x": 0, - "y": 326 + "y": 342 }, "id": 225, "options": { @@ -8896,7 +9125,7 @@ "h": 8, "w": 12, "x": 12, - "y": 326 + "y": 342 }, "id": 227, "options": { @@ -9025,7 +9254,7 @@ "h": 8, "w": 12, "x": 0, - "y": 334 + "y": 350 }, "id": 235, "options": { @@ -9154,7 +9383,7 @@ "h": 8, "w": 12, "x": 12, - "y": 334 + "y": 350 }, "id": 234, "options": { From a6daafc6a4f087101a849a28a334df34275ffcb6 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:27:29 +0200 Subject: [PATCH 037/970] refactor(txpool): small refactor for `InMemoryBlobStore` impl (#11886) --- crates/transaction-pool/src/blobstore/mem.rs | 28 +++++--------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 15160c2c3fa..c98a01b88c1 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -76,42 +76,26 @@ impl BlobStore for InMemoryBlobStore { // Retrieves the decoded blob data for the given transaction hash. fn get(&self, tx: B256) -> Result, BlobStoreError> { - let store = self.inner.store.read(); - Ok(store.get(&tx).cloned()) + Ok(self.inner.store.read().get(&tx).cloned()) } fn contains(&self, tx: B256) -> Result { - let store = self.inner.store.read(); - Ok(store.contains_key(&tx)) + Ok(self.inner.store.read().contains_key(&tx)) } fn get_all( &self, txs: Vec, ) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push((tx, item.clone())); - } - } - - Ok(items) + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect()) } fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push(item.clone()); - } else { - return Err(BlobStoreError::MissingSidecar(tx)) - } - } - - Ok(items) + txs.into_iter() + .map(|tx| store.get(&tx).cloned().ok_or_else(|| BlobStoreError::MissingSidecar(tx))) + .collect() } fn get_by_versioned_hashes( From 2f559c62bf94e0fed23e500336ceed1e941e0ed5 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:28:10 +0200 Subject: [PATCH 038/970] primitives: use alloy `MAXIMUM_EXTRA_DATA_SIZE` constant (#11881) --- Cargo.lock | 1 + crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 6 ++---- crates/node/core/Cargo.toml | 5 ++--- crates/node/core/src/args/payload_builder.rs | 5 ++--- crates/primitives-traits/src/constants/mod.rs | 3 --- crates/rpc/rpc-types-compat/src/engine/payload.rs | 3 +-- 7 files changed, 9 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8b4f76329e..a97c01e01bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7886,6 +7886,7 @@ dependencies = [ name = "reth-node-core" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "clap", diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 66a92270dba..eaae1301b46 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +alloy-consensus.workspace = true [dev-dependencies] reth-storage-api.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 711e7772b66..4c2e6b192e7 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,12 +1,10 @@ //! Collection of methods for block validation. +use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ - constants::{ - eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - MAXIMUM_EXTRA_DATA_SIZE, - }, + constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, }; use revm_primitives::calc_excess_blob_gas; diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 3ac90a88870..d7d95751cc5 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -39,6 +39,7 @@ reth-stages-types.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-consensus.workspace = true # misc eyre.workspace = true @@ -76,9 +77,7 @@ tokio.workspace = true tempfile.workspace = true [features] -optimism = [ - "reth-primitives/optimism" -] +optimism = ["reth-primitives/optimism"] # Features for vergen to generate correct env vars jemalloc = [] asm-keccak = [] diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index aec35253af2..4a18fd5b0b7 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,12 +1,11 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; +use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::{ - ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION, -}; +use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, SLOT_DURATION}; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index a4918137e7c..482852bdccd 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -13,9 +13,6 @@ pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION" /// The first four bytes of the call data for a function call specifies the function to be called. pub const SELECTOR_LEN: usize = 4; -/// Maximum extra data size in a block after genesis -pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; - /// An EPOCH is a series of 32 slots. pub const EPOCH_SLOTS: u64 = 32; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index cd9ce1cbf7d..3bbee2b00ea 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,7 +1,7 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ @@ -10,7 +10,6 @@ use alloy_rpc_types_engine::{ ExecutionPayloadV4, PayloadError, }; use reth_primitives::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, proofs::{self}, Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, }; From da5079d11fa7f1595323b5636b928fb61f1e61cf Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:28:42 +0200 Subject: [PATCH 039/970] test(txpool): add unit test for `BlobStoreCanonTracker` (#11885) --- crates/transaction-pool/Cargo.toml | 6 +- .../transaction-pool/src/blobstore/tracker.rs | 88 +++++++++++++++++++ 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 887543b521a..cdac6a1aae6 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -74,7 +74,11 @@ serde_json.workspace = true default = ["serde"] serde = ["dep:serde"] test-utils = ["rand", "paste", "serde"] -arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] +arbitrary = [ + "proptest", + "reth-primitives/arbitrary", + "proptest-arbitrary-interop", +] [[bench]] name = "truncate" diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index e6041fa12e1..f22dcf5706e 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -81,6 +81,13 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use reth_execution_types::Chain; + use reth_primitives::{ + BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, + }; + use super::*; #[test] @@ -101,4 +108,85 @@ mod tests { BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::>()) ); } + + #[test] + fn test_add_new_chain_blocks() { + let mut tracker = BlobStoreCanonTracker::default(); + + // Create sample transactions + let tx1_hash = B256::random(); // EIP-4844 transaction + let tx2_hash = B256::random(); // EIP-4844 transaction + let tx3_hash = B256::random(); // Non-EIP-4844 transaction + + // Creating a first block with EIP-4844 transactions + let block1 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 10, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned { + hash: tx1_hash, + transaction: Transaction::Eip4844(Default::default()), + ..Default::default() + }, + TransactionSigned { + hash: tx2_hash, + transaction: Transaction::Eip4844(Default::default()), + ..Default::default() + }, + // Another transaction that is not EIP-4844 + TransactionSigned { + hash: B256::random(), + transaction: Transaction::Eip7702(Default::default()), + ..Default::default() + }, + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Creating a second block with EIP-1559 and EIP-2930 transactions + // Note: This block does not contain any EIP-4844 transactions + let block2 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 11, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned { + hash: tx3_hash, + transaction: Transaction::Eip1559(Default::default()), + ..Default::default() + }, + TransactionSigned { + hash: tx2_hash, + transaction: Transaction::Eip2930(Default::default()), + ..Default::default() + }, + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Extract blocks from the chain + let chain = Chain::new(vec![block1, block2], Default::default(), None); + let blocks = chain.into_inner().0; + + // Add new chain blocks to the tracker + tracker.add_new_chain_blocks(&blocks); + + // Tx1 and tx2 should be in the block containing EIP-4844 transactions + assert_eq!(tracker.blob_txs_in_blocks.get(&10).unwrap(), &vec![tx1_hash, tx2_hash]); + // No transactions should be in the block containing non-EIP-4844 transactions + assert!(tracker.blob_txs_in_blocks.get(&11).unwrap().is_empty()); + } } From 2ae93682b4978bf80bf27c777334ead30d3e04f5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 19 Oct 2024 14:08:34 +0400 Subject: [PATCH 040/970] refactor: move `EngineValidator` setup to `RpcAddOns` (#11850) --- Cargo.lock | 1 - crates/ethereum/node/src/node.rs | 19 ++-- crates/exex/test-utils/Cargo.toml | 1 - crates/exex/test-utils/src/lib.rs | 11 +- crates/node/api/src/node.rs | 7 -- crates/node/builder/src/builder/states.rs | 5 - crates/node/builder/src/components/builder.rs | 104 ++++-------------- crates/node/builder/src/components/engine.rs | 38 ------- crates/node/builder/src/components/mod.rs | 29 +---- crates/node/builder/src/rpc.rs | 70 ++++++++++-- crates/optimism/node/src/node.rs | 37 ++++--- examples/custom-engine-types/src/main.rs | 37 +++++-- 12 files changed, 140 insertions(+), 219 deletions(-) delete mode 100644 crates/node/builder/src/components/engine.rs diff --git a/Cargo.lock b/Cargo.lock index a97c01e01bc..98bfe1f8e00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7495,7 +7495,6 @@ dependencies = [ "reth-consensus", "reth-db", "reth-db-common", - "reth-ethereum-engine-primitives", "reth-evm", "reth-execution-types", "reth-exex", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a890810b00e..d3301b2082e 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -12,15 +12,16 @@ use reth_ethereum_engine_primitives::{ use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; use reth_node_api::{ - ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, + NodeTypesWithDB, }; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::RpcAddOns, + rpc::{EngineValidatorBuilder, RpcAddOns}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; @@ -57,7 +58,6 @@ impl EthereumNode { EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, > where Node: FullNodeTypes>, @@ -74,7 +74,6 @@ impl EthereumNode { .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) } } @@ -96,6 +95,7 @@ pub type EthereumAddOns = RpcAddOns< NetworkHandle, ::Evm, >, + EthereumEngineValidatorBuilder, >; impl Node for EthereumNode @@ -110,7 +110,6 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, >; type AddOns = EthereumAddOns< @@ -347,12 +346,12 @@ pub struct EthereumEngineValidatorBuilder; impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where Types: NodeTypesWithEngine, - Node: FullNodeTypes, + Node: FullNodeComponents, EthereumEngineValidator: EngineValidator, { type Validator = EthereumEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(EthereumEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 8488cdb8b73..b850295a332 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -31,7 +31,6 @@ reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-ethereum-engine-primitives.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9e17013c4a5..9b86da7c77a 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -24,7 +24,6 @@ use reth_db::{ DatabaseEnv, }; use reth_db_common::init::init_genesis; -use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; @@ -41,10 +40,7 @@ use reth_node_builder::{ }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ - node::{ - EthereumAddOns, EthereumEngineValidatorBuilder, EthereumNetworkBuilder, - EthereumPayloadBuilder, - }, + node::{EthereumAddOns, EthereumNetworkBuilder, EthereumPayloadBuilder}, EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; @@ -140,7 +136,6 @@ where EthereumNetworkBuilder, TestExecutorBuilder, TestConsensusBuilder, - EthereumEngineValidatorBuilder, >; type AddOns = EthereumAddOns< NodeAdapter>::Components>, @@ -154,7 +149,6 @@ where .network(EthereumNetworkBuilder::default()) .executor(TestExecutorBuilder::default()) .consensus(TestConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) } fn add_ons(&self) -> Self::AddOns { @@ -284,8 +278,6 @@ pub async fn test_exex_context_with_chain_spec( let tasks = TaskManager::current(); let task_executor = tasks.executor(); - let engine_validator = EthereumEngineValidator::new(chain_spec.clone()); - let components = NodeAdapter::, _>, _> { components: Components { transaction_pool, @@ -294,7 +286,6 @@ pub async fn test_exex_context_with_chain_spec( consensus, network, payload_builder, - engine_validator, }, task_executor, provider, diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 40c2a3a60b0..3173fd2b398 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -5,7 +5,6 @@ use std::{future::Future, marker::PhantomData}; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::Consensus; -use reth_engine_primitives::EngineValidator; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; @@ -64,9 +63,6 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Network API. type Network: FullNetwork; - /// Validator for the engine API. - type EngineValidator: EngineValidator<::Engine>; - /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -87,9 +83,6 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { &self, ) -> &PayloadBuilderHandle<::Engine>; - /// Returns the engine validator. - fn engine_validator(&self) -> &Self::EngineValidator; - /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index c4da466f23e..e75a07802a6 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -97,7 +97,6 @@ impl> FullNodeComponents for NodeAdapter< type Executor = C::Executor; type Network = C::Network; type Consensus = C::Consensus; - type EngineValidator = C::EngineValidator; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -130,10 +129,6 @@ impl> FullNodeComponents for NodeAdapter< fn consensus(&self) -> &Self::Consensus { self.components.consensus() } - - fn engine_validator(&self) -> &Self::EngineValidator { - self.components.engine_validator() - } } impl> Clone for NodeAdapter { diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index ab8e29929a9..48a0ba9b5fd 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -4,7 +4,6 @@ use std::{future::Future, marker::PhantomData}; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; use reth_primitives::Header; use reth_transaction_pool::TransactionPool; @@ -16,8 +15,6 @@ use crate::{ BuilderContext, ConfigureEvm, FullNodeTypes, }; -use super::EngineValidatorBuilder; - /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// /// This type is stateful and captures the configuration of the node's components. @@ -38,23 +35,22 @@ use super::EngineValidatorBuilder; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, executor_builder: ExecB, consensus_builder: ConsB, - engine_validator_builder: EVB, _marker: PhantomData, } -impl - ComponentsBuilder +impl + ComponentsBuilder { /// Configures the node types. pub fn node_types( self, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where Types: FullNodeTypes, { @@ -64,7 +60,6 @@ impl network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -73,7 +68,6 @@ impl payload_builder, network_builder, consensus_builder, - engine_validator_builder, _marker: Default::default(), } } @@ -86,7 +80,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -99,7 +92,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -112,7 +104,6 @@ impl network_builder: f(self.network_builder), executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -125,7 +116,6 @@ impl network_builder: self.network_builder, executor_builder: f(self.executor_builder), consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -138,14 +128,13 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: f(self.consensus_builder), - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -156,7 +145,7 @@ where pub fn pool( self, pool_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PoolBuilder, { @@ -166,7 +155,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -175,14 +163,13 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -194,7 +181,7 @@ where pub fn network( self, network_builder: NB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where NB: NetworkBuilder, { @@ -204,7 +191,6 @@ where network_builder: _, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -213,7 +199,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -225,7 +210,7 @@ where pub fn payload( self, payload_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { @@ -235,7 +220,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -244,7 +228,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -256,7 +239,7 @@ where pub fn executor( self, executor_builder: EB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where EB: ExecutorBuilder, { @@ -266,7 +249,6 @@ where network_builder, executor_builder: _, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -275,7 +257,6 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -287,7 +268,7 @@ where pub fn consensus( self, consensus_builder: CB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where CB: ConsensusBuilder, { @@ -297,38 +278,7 @@ where network_builder, executor_builder, consensus_builder: _, - engine_validator_builder, - _marker, - } = self; - ComponentsBuilder { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder, - _marker, - } - } - /// Configures the consensus builder. - /// - /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's - /// components for consensus. - pub fn engine_validator( - self, - engine_validator_builder: EngineVB, - ) -> ComponentsBuilder - where - EngineVB: EngineValidatorBuilder, - { - let Self { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder: _, _marker, } = self; ComponentsBuilder { @@ -337,14 +287,13 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, _marker, } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -352,16 +301,8 @@ where PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, ConsB: ConsensusBuilder, - EVB: EngineValidatorBuilder, { - type Components = Components< - Node, - PoolB::Pool, - ExecB::EVM, - ExecB::Executor, - ConsB::Consensus, - EVB::Validator, - >; + type Components = Components; async fn build_components( self, @@ -373,7 +314,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; @@ -382,7 +322,6 @@ where let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; let consensus = consensus_builder.build_consensus(context).await?; - let engine_validator = engine_validator_builder.build_validator(context).await?; Ok(Components { transaction_pool: pool, @@ -391,12 +330,11 @@ where payload_builder, executor, consensus, - engine_validator, }) } } -impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), @@ -404,7 +342,6 @@ impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { network_builder: (), executor_builder: (), consensus_builder: (), - engine_validator_builder: (), _marker: Default::default(), } } @@ -430,18 +367,17 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm

, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/engine.rs b/crates/node/builder/src/components/engine.rs deleted file mode 100644 index b3ee7cbbbf2..00000000000 --- a/crates/node/builder/src/components/engine.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Consensus component for the node builder. -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; - -use crate::{BuilderContext, FullNodeTypes}; -use std::future::Future; - -/// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { - /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; - - /// Creates the engine validator. - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> + Send; -} - -impl EngineValidatorBuilder for F -where - Node: FullNodeTypes, - Validator: - EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, -{ - type Validator = Validator; - - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> { - self(ctx) - } -} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index ff1646593ed..42001fc1005 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -9,7 +9,6 @@ mod builder; mod consensus; -mod engine; mod execute; mod network; mod payload; @@ -17,7 +16,6 @@ mod pool; pub use builder::*; pub use consensus::*; -pub use engine::*; pub use execute::*; pub use network::*; pub use payload::*; @@ -27,7 +25,7 @@ use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; +use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::Header; use reth_transaction_pool::TransactionPool; @@ -55,9 +53,6 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Network API. type Network: FullNetwork; - /// Validator for the engine API. - type EngineValidator: EngineValidator<::Engine>; - /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -75,16 +70,13 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Returns the handle to the payload builder service. fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; - - /// Returns the engine validator. - fn engine_validator(&self) -> &Self::EngineValidator; } /// All the components of the node. /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. @@ -97,26 +89,22 @@ pub struct Components::Engine>, - /// The validator for the engine API. - pub engine_validator: Validator, } -impl NodeComponents - for Components +impl NodeComponents + for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; - type EngineValidator = Val; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -143,21 +131,15 @@ where ) -> &PayloadBuilderHandle<::Engine> { &self.payload_builder } - - fn engine_validator(&self) -> &Self::EngineValidator { - &self.engine_validator - } } -impl Clone - for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, Cons: Consensus + Clone, - Val: EngineValidator<::Engine>, { fn clone(&self) -> Self { Self { @@ -167,7 +149,6 @@ where consensus: self.consensus.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), - engine_validator: self.engine_validator.clone(), } } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index d8cce9217ef..18293118dc6 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -2,6 +2,7 @@ use std::{ fmt::{self, Debug}, + future::Future, marker::PhantomData, ops::{Deref, DerefMut}, }; @@ -9,7 +10,7 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, }; use reth_node_core::{ node_config::NodeConfig, @@ -327,31 +328,38 @@ where /// Node add-ons containing RPC server configuration, with customizable eth API handler. #[allow(clippy::type_complexity)] -pub struct RpcAddOns { +pub struct RpcAddOns { /// Additional RPC add-ons. pub hooks: RpcHooks, /// Builder for `EthApi` eth_api_builder: Box) -> EthApi + Send + Sync>, + /// Engine validator + engine_validator_builder: EV, _pd: PhantomData<(Node, EthApi)>, } -impl Debug for RpcAddOns { +impl Debug + for RpcAddOns +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcAddOns") .field("hooks", &self.hooks) .field("eth_api_builder", &"...") + .field("engine_validator_builder", &self.engine_validator_builder) .finish() } } -impl RpcAddOns { +impl RpcAddOns { /// Creates a new instance of the RPC add-ons. pub fn new( eth_api_builder: impl FnOnce(&EthApiBuilderCtx) -> EthApi + Send + Sync + 'static, + engine_validator_builder: EV, ) -> Self { Self { hooks: RpcHooks::default(), eth_api_builder: Box::new(eth_api_builder), + engine_validator_builder, _pd: PhantomData, } } @@ -377,23 +385,28 @@ impl RpcAddOns { } } -impl> Default - for RpcAddOns +impl Default for RpcAddOns +where + Node: FullNodeComponents, + EthApi: EthApiTypes + EthApiBuilder, + EV: Default, { fn default() -> Self { - Self::new(EthApi::build) + Self::new(EthApi::build, EV::default()) } } -impl NodeAddOns for RpcAddOns +impl NodeAddOns for RpcAddOns where N: FullNodeComponents, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EV: EngineValidatorBuilder, { type Handle = RpcHandle; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; + let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; let client = ClientVersionV1 { code: CLIENT_CODE, @@ -411,7 +424,7 @@ where Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - node.engine_validator().clone(), + engine_validator_builder.build(&ctx).await?, ); info!(target: "reth::cli", "Engine API handler initialized"); @@ -427,7 +440,7 @@ where .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) - .build_with_auth_server(module_config, engine_api, self.eth_api_builder); + .build_with_auth_server(module_config, engine_api, eth_api_builder); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -443,7 +456,7 @@ where auth_module: &mut auth_module, }; - let RpcHooks { on_rpc_started, extend_rpc_modules } = self.hooks; + let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; extend_rpc_modules.extend_rpc_modules(ctx)?; @@ -503,7 +516,7 @@ pub trait RethRpcAddOns: fn hooks_mut(&mut self) -> &mut RpcHooks; } -impl RethRpcAddOns for RpcAddOns +impl RethRpcAddOns for RpcAddOns where Self: NodeAddOns>, { @@ -525,3 +538,36 @@ impl EthApiBuilder for EthApi: Send { + /// The consensus implementation to build. + type Validator: EngineValidator<::Engine> + + Clone + + Unpin + + 'static; + + /// Creates the engine validator. + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> + Send; +} + +impl EngineValidatorBuilder for F +where + Node: FullNodeComponents, + Validator: + EngineValidator<::Engine> + Clone + Unpin + 'static, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send, + Fut: Future> + Send, +{ + type Validator = Validator; + + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c2576d318dd..175b2d4bf41 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -6,14 +6,16 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::ConfigureEvm; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; -use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives}; +use reth_node_api::{ + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, +}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::{RethRpcAddOns, RpcAddOns, RpcHandle}, + rpc::{EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; @@ -68,7 +70,6 @@ impl OptimismNode { OptimismNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, > where Node: FullNodeTypes< @@ -86,7 +87,6 @@ impl OptimismNode { }) .executor(OptimismExecutorBuilder::default()) .consensus(OptimismConsensusBuilder::default()) - .engine_validator(OptimismEngineValidatorBuilder::default()) } } @@ -103,7 +103,6 @@ where OptimismNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, >; type AddOns = OptimismAddOns< @@ -131,7 +130,9 @@ impl NodeTypesWithEngine for OptimismNode { /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OptimismAddOns(pub RpcAddOns>); +pub struct OptimismAddOns( + pub RpcAddOns, OptimismEngineValidatorBuilder>, +); impl Default for OptimismAddOns { fn default() -> Self { @@ -142,12 +143,14 @@ impl Default for OptimismAddOns { impl OptimismAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { - Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http))) + Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) } } -impl>> NodeAddOns - for OptimismAddOns +impl NodeAddOns for OptimismAddOns +where + N: FullNodeComponents>, + OptimismEngineValidator: EngineValidator<::Engine>, { type Handle = RpcHandle>; @@ -159,8 +162,10 @@ impl>> NodeAddOn } } -impl>> RethRpcAddOns - for OptimismAddOns +impl RethRpcAddOns for OptimismAddOns +where + N: FullNodeComponents>, + OptimismEngineValidator: EngineValidator<::Engine>, { type EthApi = OpEthApi; @@ -458,12 +463,12 @@ pub struct OptimismEngineValidatorBuilder; impl EngineValidatorBuilder for OptimismEngineValidatorBuilder where Types: NodeTypesWithEngine, - Node: FullNodeTypes, + Node: FullNodeComponents, OptimismEngineValidator: EngineValidator, { type Validator = OptimismEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(OptimismEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(OptimismEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f833da86236..135c4f3f247 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -34,12 +34,15 @@ use alloy_rpc_types::{ use reth::{ api::PayloadTypes, builder::{ - components::{ComponentsBuilder, EngineValidatorBuilder, PayloadServiceBuilder}, + components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, + rpc::{EngineValidatorBuilder, RpcAddOns}, BuilderContext, FullNodeTypes, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, PayloadBuilderConfig, }, + network::NetworkHandle, providers::{CanonStateSubscriptions, StateProviderFactory}, + rpc::eth::EthApi, tasks::TaskManager, transaction_pool::TransactionPool, }; @@ -50,13 +53,13 @@ use reth_basic_payload_builder::{ use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, EngineTypes, EngineValidator, PayloadAttributes, - PayloadBuilderAttributes, + validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{ - EthereumAddOns, EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, + EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }, EthEvmConfig, @@ -202,12 +205,14 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeTypes>, + N: FullNodeComponents< + Types: NodeTypesWithEngine, + >, { type Validator = CustomEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.chain_spec() }) + async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + Ok(CustomEngineValidator { chain_spec: ctx.config.chain.clone() }) } } @@ -226,6 +231,18 @@ impl NodeTypesWithEngine for MyCustomNode { type Engine = CustomEngineTypes; } +/// Custom addons configuring RPC types +pub type MyNodeAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, + CustomEngineValidatorBuilder, +>; + /// Implement the Node trait for the custom node /// /// This provides a preset configuration for the node @@ -240,9 +257,8 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - CustomEngineValidatorBuilder, >; - type AddOns = EthereumAddOns< + type AddOns = MyNodeAddOns< NodeAdapter>::Components>, >; @@ -254,11 +270,10 @@ where .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(CustomEngineValidatorBuilder::default()) } fn add_ons(&self) -> Self::AddOns { - EthereumAddOns::default() + MyNodeAddOns::default() } } From 3bd695ee630f3979a2ea53f2308bdb1c9b2f18c1 Mon Sep 17 00:00:00 2001 From: Oliver Date: Sat, 19 Oct 2024 14:48:35 +0200 Subject: [PATCH 041/970] feat: update el requests for devnet 4 (#11865) Co-authored-by: Matthias Seitz --- Cargo.lock | 229 ++++++++++++------ Cargo.toml | 127 +++++----- bin/reth-bench/src/authenticated_transport.rs | 3 +- bin/reth-bench/src/valid_payload.rs | 8 - .../src/commands/debug_cmd/replay_engine.rs | 4 +- crates/blockchain-tree/src/blockchain_tree.rs | 1 - crates/chain-state/src/in_memory.rs | 3 +- crates/chain-state/src/test_utils.rs | 6 +- crates/chainspec/Cargo.toml | 1 - crates/chainspec/src/spec.rs | 10 +- crates/cli/commands/src/stage/drop.rs | 1 - crates/consensus/auto-seal/Cargo.toml | 1 + crates/consensus/auto-seal/src/lib.rs | 10 +- crates/consensus/beacon/src/engine/handle.rs | 11 +- crates/consensus/beacon/src/engine/message.rs | 5 + crates/consensus/beacon/src/engine/mod.rs | 23 +- .../consensus/beacon/src/engine/test_utils.rs | 2 +- crates/consensus/common/src/validation.rs | 27 +-- crates/consensus/consensus/Cargo.toml | 1 + crates/consensus/consensus/src/lib.rs | 27 ++- crates/e2e-test-utils/src/transaction.rs | 3 +- .../engine/invalid-block-hooks/src/witness.rs | 2 +- crates/engine/local/src/miner.rs | 2 + crates/engine/tree/src/tree/mod.rs | 26 +- crates/engine/util/src/engine_store.rs | 8 +- crates/engine/util/src/reorg.rs | 74 +++--- crates/engine/util/src/skip_new_payload.rs | 14 +- crates/ethereum/consensus/Cargo.toml | 1 + crates/ethereum/consensus/src/lib.rs | 8 +- crates/ethereum/consensus/src/validation.rs | 19 +- .../ethereum/engine-primitives/src/payload.rs | 19 +- crates/ethereum/evm/src/eip6110.rs | 91 +++---- crates/ethereum/evm/src/execute.rs | 61 +++-- crates/ethereum/evm/src/strategy.rs | 52 ++-- crates/ethereum/payload/Cargo.toml | 3 +- crates/ethereum/payload/src/lib.rs | 17 +- crates/evm/execution-types/Cargo.toml | 7 +- crates/evm/execution-types/src/execute.rs | 6 +- .../execution-types/src/execution_outcome.rs | 86 ++----- crates/evm/src/execute.rs | 20 +- crates/evm/src/system_calls/eip7002.rs | 49 +--- crates/evm/src/system_calls/eip7251.rs | 53 +--- crates/evm/src/system_calls/mod.rs | 19 +- crates/evm/src/test_utils.rs | 6 +- crates/exex/exex/src/backfill/test_utils.rs | 4 +- crates/net/eth-wire-types/src/blocks.rs | 10 +- crates/net/eth-wire-types/src/header.rs | 6 +- crates/net/p2p/src/full_block.rs | 16 -- crates/optimism/evm/Cargo.toml | 3 +- crates/optimism/evm/src/execute.rs | 7 +- crates/optimism/evm/src/lib.rs | 1 - crates/optimism/evm/src/strategy.rs | 9 +- crates/optimism/payload/src/builder.rs | 4 +- crates/optimism/payload/src/payload.rs | 6 +- crates/optimism/primitives/src/bedrock.rs | 2 +- crates/optimism/rpc/src/eth/receipt.rs | 2 - crates/optimism/storage/src/lib.rs | 3 +- crates/payload/validator/Cargo.toml | 2 + crates/payload/validator/src/lib.rs | 13 +- crates/primitives-traits/Cargo.toml | 2 +- crates/primitives-traits/src/block/body.rs | 17 +- .../src/header/test_utils.rs | 2 +- crates/primitives-traits/src/lib.rs | 3 - crates/primitives-traits/src/mod.rs | 0 crates/primitives-traits/src/request.rs | 58 ----- crates/primitives/Cargo.toml | 2 - crates/primitives/src/alloy_compat.rs | 3 - crates/primitives/src/block.rs | 71 +----- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/proofs.rs | 11 +- crates/primitives/src/transaction/mod.rs | 40 +-- crates/revm/Cargo.toml | 3 + crates/revm/src/batch.rs | 7 +- crates/rpc/rpc-api/src/engine.rs | 7 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 42 +++- crates/rpc/rpc-engine-api/tests/it/payload.rs | 15 +- .../rpc-eth-api/src/helpers/pending_block.rs | 20 +- crates/rpc/rpc-eth-types/src/receipt.rs | 2 - crates/rpc/rpc-types-compat/src/block.rs | 4 +- .../rpc-types-compat/src/engine/payload.rs | 137 ++--------- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/otterscan.rs | 1 - crates/rpc/rpc/src/txpool.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 14 -- .../codecs/src/alloy/authorization_list.rs | 4 +- crates/storage/codecs/src/alloy/header.rs | 10 +- crates/storage/codecs/src/alloy/mod.rs | 3 - crates/storage/codecs/src/alloy/request.rs | 40 --- crates/storage/db-api/src/models/mod.rs | 4 +- crates/storage/db/src/tables/mod.rs | 7 +- .../src/providers/blockchain_provider.rs | 55 +---- .../src/providers/database/metrics.rs | 4 - .../provider/src/providers/database/mod.rs | 14 +- .../src/providers/database/provider.rs | 150 +++--------- crates/storage/provider/src/providers/mod.rs | 12 +- .../src/providers/static_file/manager.rs | 15 +- .../storage/provider/src/test_utils/blocks.rs | 1 - .../storage/provider/src/test_utils/mock.rs | 16 +- .../storage/provider/src/test_utils/noop.rs | 12 +- crates/storage/storage-api/src/block.rs | 5 +- crates/storage/storage-api/src/lib.rs | 3 - crates/storage/storage-api/src/requests.rs | 14 -- docs/crates/db.md | 4 +- testing/ef-tests/src/models.rs | 4 +- testing/testing-utils/Cargo.toml | 4 +- testing/testing-utils/src/generators.rs | 47 +--- 106 files changed, 800 insertions(+), 1329 deletions(-) delete mode 100644 crates/primitives-traits/src/mod.rs delete mode 100644 crates/primitives-traits/src/request.rs delete mode 100644 crates/storage/codecs/src/alloy/request.rs delete mode 100644 crates/storage/storage-api/src/requests.rs diff --git a/Cargo.lock b/Cargo.lock index 98bfe1f8e00..82f42b07b16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +checksum = "42642aed67f938363d9c7543e5ca4163cfb4205d9ec15fe933dc4e865d2932dd" dependencies = [ "alloy-eips", "alloy-primitives", @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "eeffd2590ce780ddfaa9d0ae340eb2b4e08627650c4676eef537cef0b4bf535d" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +checksum = "9fbc52a30df46f9831ed74557dfad0d94b12420393662a8b9ef90e2d6c8cb4b0" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -188,6 +188,8 @@ dependencies = [ "arbitrary", "c-kzg", "derive_more 1.0.0", + "ethereum_ssz", + "ethereum_ssz_derive", "once_cell", "serde", "sha2 0.10.8", @@ -195,9 +197,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "0787d1688b9806290313cc335d416cc7ee39b11e3245f3d218544c62572d92ba" dependencies = [ "alloy-primitives", "alloy-serde", @@ -218,9 +220,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "d55a16a5f9ca498a217c060414bcd1c43e934235dc8058b31b87dcd69ff4f105" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -232,9 +234,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +checksum = "3d236a8c3e1d5adc09b1b63c81815fc9b757d9a4ba9482cc899f9679b55dd437" dependencies = [ "alloy-consensus", "alloy-eips", @@ -253,9 +255,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +checksum = "cd15a0990fa8a56d85a42d6a689719aa4eebf5e2f1a5c5354658c0bfc52cac9a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -266,9 +268,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "2249f3c3ce446cf4063fe3d1aa7530823643c2706a1cc63045e0683ebc497a0a" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -315,9 +317,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +checksum = "316f522bb6f9ac3805132112197957013b570e20cfdad058e8339dae6030c849" dependencies = [ "alloy-chains", "alloy-consensus", @@ -341,21 +343,24 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot 0.12.3", "pin-project", "reqwest", + "schnellru", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-pubsub" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32cef487122ae75c91eb50154c70801d71fabdb976fec6c49e0af5e6486ab15" +checksum = "222cd9b17b1c5ad48de51a88ffbdb17f17145170288f22662f80ac88739125e6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -394,9 +399,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "5b2ab59712c594c9624aaa69e38e4d38f180cb569f1fa46cdaf8c21fd50793e5" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -414,13 +419,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +checksum = "ba21284319e12d053baa204d438db6c1577aedd94c1298e4becefdac1f9cec87" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -431,9 +437,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb520ed46cc5b7d8c014a73fdd77b6a310383a2a5c0a5ae3c9b8055881f062b7" +checksum = "416cc9f391d0b876c4c8da85f7131e771a88a55b917cc9a35e1724d9409e3b1c" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -443,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +checksum = "ba40bea86c3102b9ed9b3be579e32e0b3e54e766248d873de5fc0437238c8df2" dependencies = [ "alloy-primitives", "alloy-serde", @@ -454,9 +460,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8dc5980fe30203d698627cddb5f0cedc57f900c8b5e1229c8b9448e37acb4a" +checksum = "b535781fe224c101c3d957b514cb9f438d165ff0280e5c0b2f87a0d9a2950593" dependencies = [ "alloy-eips", "alloy-primitives", @@ -468,9 +474,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d8f8c5bfb160081a772f1f68eb9a37e8929c4ef74e5d01f5b78c2b645a5c5e" +checksum = "4303deacf4cbf12ed4431a5a1bbc3284f0defb4b8b72d9aa2b888656cc5ae657" dependencies = [ "alloy-primitives", "serde", @@ -478,9 +484,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0285c4c09f838ab830048b780d7f4a4f460f309aa1194bb049843309524c64c" +checksum = "44848fced3b42260b9cb61f22102246636dfe5a2d0132f8d10a617df3cb1a74b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -488,6 +494,8 @@ dependencies = [ "alloy-rlp", "alloy-serde", "derive_more 1.0.0", + "ethereum_ssz", + "ethereum_ssz_derive", "jsonrpsee-types", "jsonwebtoken", "rand 0.8.5", @@ -497,9 +505,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +checksum = "35894711990019fafff0012b82b9176cbb744516eb2a9bbe6b8e5cae522163ee" dependencies = [ "alloy-consensus", "alloy-eips", @@ -517,9 +525,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cec23ce56c869eec5f6b6fd6a8a92b5aa0cfaf8d7be3a96502e537554dc7430" +checksum = "cac6250cad380a005ecb5ffc6d2facf03df0e72628d819a63dd8c3ade7a766ff" dependencies = [ "alloy-eips", "alloy-primitives", @@ -530,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017cad3e5793c5613588c1f9732bcbad77e820ba7d0feaba3527749f856fdbc5" +checksum = "f568c5624881896d8a25e19acbdcbabadd8df339427ea2f10b2ee447d57c4509" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -544,9 +552,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b230e321c416be7f50530159392b4c41a45596d40d97e185575bcd0b545e521" +checksum = "d4a37d2e1ed9b7daf20ad0b3e0092613cbae46737e0e988b23caa556c7067ce6" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -556,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +checksum = "2843c195675f06b29c09a4315cccdc233ab5bdc7c0a3775909f9f0cab5e9ae0f" dependencies = [ "alloy-primitives", "arbitrary", @@ -568,9 +576,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +checksum = "88b2a00d9803dfef99963303ffe41a7bf2221f3342f0a503d6741a9f4a18e5e5" dependencies = [ "alloy-primitives", "async-trait", @@ -582,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" +checksum = "5a2505d4f8c98dcae86152d58d549cb4bcf953f8352fca903410e0a0ef535571" dependencies = [ "alloy-consensus", "alloy-network", @@ -670,9 +678,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "9dc2c8f6b8c227ef0398f702d954c4ab572c2ead3c1ed4a5157aa1cbaf959747" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -685,13 +693,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "dd328e990d57f4c4e63899fb2c26877597d6503f8e0022a3d71b2d753ecbfc0c" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -704,9 +713,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b90cf9cde7f2fce617da52768ee28f522264b282d148384a4ca0ea85af04fa3a" +checksum = "89aea26aaf1d67904a7ff95ec4a24ddd5e7d419a6945f641b885962d7c2803e2" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -723,9 +732,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.4.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7153b88690de6a50bba81c11e1d706bc41dbb90126d607404d60b763f6a3947f" +checksum = "e222e950ecc4ea12fbfb524b9a2275cac2cd5f57c8ce25bcaf1bd3ff80dd8fc8" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -2665,6 +2674,47 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ethereum_serde_utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" +dependencies = [ + "alloy-primitives", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfbba28f4f3f32d92c06a64f5bf6c4537b5d4e21f28c689bd2bbaecfea4e0d3e" +dependencies = [ + "alloy-primitives", + "derivative", + "ethereum_serde_utils", + "itertools 0.13.0", + "serde", + "serde_derive", + "smallvec", + "typenum", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d37845ba7c16bf4be8be4b5786f03a2ba5f2fda0d7f9e7cb2282f69cff420d7" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -5157,9 +5207,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea7162170c6f3cad8f67f4dd7108e3f78349fd553da5b8bebff1e7ef8f38896" +checksum = "99d49163f952491820088dd0e66f3a35d63337c3066eceff0a931bf83a8e2101" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5175,9 +5225,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3d31dfbbd8dd898c7512f8ce7d30103980485416f668566100b0ed0994b958" +checksum = "8e46c2ab105f679f0cbfbc3fb762f3456d4b8556c841e667fc8f3c2226eb6c1e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5189,9 +5239,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d113b325527ba7da271a8793f1c14bdf7f035ce9e0611e668c36fc6812568c7f" +checksum = "75ff1ea317441b9eb6317b24d13f9088e3b14ef48b15bfb6a125ca404df036d8" dependencies = [ "alloy-consensus", "alloy-network", @@ -5203,9 +5253,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310873e4fbfc41986716c4fb6000a8b49d025d932d2c261af58271c434b05288" +checksum = "6c439457b2a1791325603fc18a94cc175e0b4b1127f11ff8a45071f05d044dcb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5220,9 +5270,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323c65880e2561aa87f74f8af260fd15b9cc930c448c88a60ae95af86c88c634" +checksum = "9c9556293835232b019ec9c6fd84e4265a3151111af60ea09b5b513e3dbed41c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5237,16 +5287,18 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349e7b420f45d1a00216ec4c65fcf3f0057a841bc39732c405c85ae782b94121" +checksum = "8a42a5ac4e07ed226b6a2aeefaad9b2cc7ec160e372ba626a4214d681a355fc2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", "derive_more 1.0.0", + "ethereum_ssz", "op-alloy-protocol", "serde", + "snap", ] [[package]] @@ -6310,6 +6362,7 @@ dependencies = [ name = "reth-auto-seal-consensus" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures-util", @@ -6700,6 +6753,7 @@ dependencies = [ name = "reth-consensus" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "auto_impl", "derive_more 1.0.0", @@ -7275,6 +7329,7 @@ name = "reth-ethereum-consensus" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-chainspec", "reth-consensus", @@ -7327,6 +7382,7 @@ name = "reth-ethereum-payload-builder" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-basic-payload-builder", "reth-chain-state", @@ -8343,6 +8399,8 @@ dependencies = [ name = "reth-payload-validator" version = "1.1.0" dependencies = [ + "alloy-eips", + "alloy-primitives", "alloy-rpc-types", "reth-chainspec", "reth-primitives", @@ -8524,6 +8582,7 @@ dependencies = [ name = "reth-revm" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "reth-chainspec", "reth-consensus-common", @@ -9274,9 +9333,9 @@ dependencies = [ [[package]] name = "revm" -version = "14.0.3" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641702b12847f9ed418d552f4fcabe536d867a2c980e96b6e7e25d7b992f929f" +checksum = "34e44692d5736cc44c697a372e507890f8797f06d1541c5f4b9bec594d90fd8a" dependencies = [ "auto_impl", "cfg-if", @@ -9289,9 +9348,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43c44af0bf801f48d25f7baf25cf72aff4c02d610f83b428175228162fef0246" +checksum = "a64e2246ad480167548724eb9c9c66945241b867c7d50894de3ca860c9823a45" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9308,9 +9367,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "10.0.3" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5e14002afae20b5bf1566f22316122f42f57517000e559c55b25bf7a49cba2" +checksum = "6f89940d17d5d077570de1977f52f69049595322e237cb6c754c3d47f668f023" dependencies = [ "revm-primitives", "serde", @@ -9318,9 +9377,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "11.0.3" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3198c06247e8d4ad0d1312591edf049b0de4ddffa9fecb625c318fd67db8639b" +checksum = "d8f816aaea3245cbdbe7fdd84955df33597f9322c7912c3e3ba7bc855e03211f" dependencies = [ "aurora-engine-modexp", "blst", @@ -9338,9 +9397,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "10.0.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f1525851a03aff9a9d6a1d018b414d76252d6802ab54695b27093ecd7e7a101" +checksum = "532411bbde45a46707c1d434dcdc29866cf261c1b748fb01b303ce3b4310b361" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11355,6 +11414,20 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.12.3", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.72" diff --git a/Cargo.toml b/Cargo.toml index 54111096902..6c66e501ef4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -410,9 +410,9 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "14.0.3", features = ["std"], default-features = false } -revm-inspectors = "0.8.1" -revm-primitives = { version = "10.0.0", features = [ +revm = { version = "16.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.9.0" +revm-primitives = { version = "12.0.0", features = [ "std", ], default-features = false } @@ -424,45 +424,45 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.4.2", default-features = false } -alloy-eips = { version = "0.4.2", default-features = false } -alloy-genesis = { version = "0.4.2", default-features = false } -alloy-json-rpc = { version = "0.4.2", default-features = false } -alloy-network = { version = "0.4.2", default-features = false } -alloy-network-primitives = { version = "0.4.2", default-features = false } -alloy-node-bindings = { version = "0.4.2", default-features = false } -alloy-provider = { version = "0.4.2", features = [ +alloy-consensus = { version = "0.5.2", default-features = false } +alloy-eips = { version = "0.5.2", default-features = false } +alloy-genesis = { version = "0.5.2", default-features = false } +alloy-json-rpc = { version = "0.5.2", default-features = false } +alloy-network = { version = "0.5.2", default-features = false } +alloy-network-primitives = { version = "0.5.2", default-features = false } +alloy-node-bindings = { version = "0.5.2", default-features = false } +alloy-provider = { version = "0.5.2", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.4.2", default-features = false } -alloy-rpc-client = { version = "0.4.2", default-features = false } -alloy-rpc-types = { version = "0.4.2", features = [ +alloy-pubsub = { version = "0.5.2", default-features = false } +alloy-rpc-client = { version = "0.5.2", default-features = false } +alloy-rpc-types = { version = "0.5.2", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.4.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.4.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.4.2", default-features = false } -alloy-rpc-types-debug = { version = "0.4.2", default-features = false } -alloy-rpc-types-engine = { version = "0.4.2", default-features = false } -alloy-rpc-types-eth = { version = "0.4.2", default-features = false } -alloy-rpc-types-mev = { version = "0.4.2", default-features = false } -alloy-rpc-types-trace = { version = "0.4.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.4.2", default-features = false } -alloy-serde = { version = "0.4.2", default-features = false } -alloy-signer = { version = "0.4.2", default-features = false } -alloy-signer-local = { version = "0.4.2", default-features = false } -alloy-transport = { version = "0.4.2" } -alloy-transport-http = { version = "0.4.2", features = [ +alloy-rpc-types-admin = { version = "0.5.2", default-features = false } +alloy-rpc-types-anvil = { version = "0.5.2", default-features = false } +alloy-rpc-types-beacon = { version = "0.5.2", default-features = false } +alloy-rpc-types-debug = { version = "0.5.2", default-features = false } +alloy-rpc-types-engine = { version = "0.5.2", default-features = false } +alloy-rpc-types-eth = { version = "0.5.2", default-features = false } +alloy-rpc-types-mev = { version = "0.5.2", default-features = false } +alloy-rpc-types-trace = { version = "0.5.2", default-features = false } +alloy-rpc-types-txpool = { version = "0.5.2", default-features = false } +alloy-serde = { version = "0.5.2", default-features = false } +alloy-signer = { version = "0.5.2", default-features = false } +alloy-signer-local = { version = "0.5.2", default-features = false } +alloy-transport = { version = "0.5.2" } +alloy-transport-http = { version = "0.5.2", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.4.2", default-features = false } -alloy-transport-ws = { version = "0.4.2", default-features = false } +alloy-transport-ipc = { version = "0.5.2", default-features = false } +alloy-transport-ws = { version = "0.5.2", default-features = false } # op -op-alloy-rpc-types = "0.4" -op-alloy-rpc-types-engine = "0.4" -op-alloy-network = "0.4" -op-alloy-consensus = "0.4" +op-alloy-rpc-types = "0.5" +op-alloy-rpc-types-engine = "0.5" +op-alloy-network = "0.5" +op-alloy-consensus = "0.5" # misc aquamarine = "0.5" @@ -593,30 +593,35 @@ tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" -[patch.crates-io] -#alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} +#[patch.crates-io] +#alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } + +#op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } diff --git a/bin/reth-bench/src/authenticated_transport.rs b/bin/reth-bench/src/authenticated_transport.rs index c946d244de9..72c4fd29889 100644 --- a/bin/reth-bench/src/authenticated_transport.rs +++ b/bin/reth-bench/src/authenticated_transport.rs @@ -84,7 +84,8 @@ impl InnerTransport { let (auth, claims) = build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?; - let inner = WsConnect { url: url.to_string(), auth: Some(auth) } + let inner = WsConnect::new(url.clone()) + .with_auth(auth) .into_service() .await .map(Self::Ws) diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 6353aea7123..b00f4ddcd64 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -215,14 +215,6 @@ pub(crate) async fn call_new_payload>( versioned_hashes: Vec, ) -> TransportResult { match payload { - ExecutionPayload::V4(_payload) => { - todo!("V4 payloads not supported yet"); - // auth_provider - // .new_payload_v4_wait(payload, versioned_hashes, parent_beacon_block_root, ...) - // .await?; - // - // Ok(EngineApiMessageVersion::V4) - } ExecutionPayload::V3(payload) => { // We expect the caller let parent_beacon_block_root = parent_beacon_block_root diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index cbffa1f0e07..de497cbe007 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -171,7 +171,9 @@ impl> Command { debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } StoredEngineApiMessage::NewPayload { payload, cancun_fields } => { - let response = beacon_engine_handle.new_payload(payload, cancun_fields).await?; + // todo: prague (last arg) + let response = + beacon_engine_handle.new_payload(payload, cancun_fields, None).await?; debug!(target: "reth::cli", ?response, "Received for new payload"); } }; diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index db43dffcd36..1e2ed2a4a2e 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1635,7 +1635,6 @@ mod tests { transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), - requests: None, }, }, body.iter().map(|tx| tx.signer()).collect(), diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f157da5ff45..be33e1fd79a 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -865,10 +865,11 @@ impl NewCanonicalChain { mod tests { use super::*; use crate::test_utils::TestBlockBuilder; + use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt, Requests}; + use reth_primitives::{Account, Bytecode, Receipt}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index a820bb5cf01..f1648ab6bff 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -3,6 +3,7 @@ use crate::{ CanonStateSubscriptions, }; use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -12,8 +13,8 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -169,7 +170,6 @@ impl TestBlockBuilder { transactions: transactions.into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), withdrawals: Some(vec![].into()), - requests: None, }, }; diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 2864427c2af..87df28322a6 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -22,7 +22,6 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-eips = { workspace = true, features = ["serde"] } alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } -alloy-trie.workspace = true alloy-consensus.workspace = true # misc diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 59e1a5ce1e1..a7f45727dd8 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,9 +3,9 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::eip7685::EMPTY_REQUESTS_HASH; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; -use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH}; @@ -284,8 +284,9 @@ impl ChainSpec { }; // If Prague is activated at genesis we set requests root to an empty trie root. - let requests_root = - self.is_prague_active_at_timestamp(self.genesis.timestamp).then_some(EMPTY_ROOT_HASH); + let requests_hash = self + .is_prague_active_at_timestamp(self.genesis.timestamp) + .then_some(EMPTY_REQUESTS_HASH); Header { gas_limit: self.genesis.gas_limit, @@ -301,7 +302,7 @@ impl ChainSpec { parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, ..Default::default() } } @@ -940,6 +941,7 @@ mod tests { use alloy_chains::Chain; use alloy_genesis::{ChainConfig, GenesisAccount}; use alloy_primitives::{b256, hex}; + use alloy_trie::EMPTY_ROOT_HASH; use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; use reth_trie_common::TrieAccount; diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 9e0396404b3..3a277cabd18 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -81,7 +81,6 @@ impl> Command tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.clear::()?; reset_stage_checkpoint(tx, StageId::Bodies)?; insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index b4b28123033..24985871141 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -31,6 +31,7 @@ reth-tokio-util.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 261227f1074..16299e19ba4 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -15,6 +15,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -25,7 +26,7 @@ use reth_execution_errors::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, Requests, SealedBlock, + proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, }; use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; @@ -301,7 +302,7 @@ impl StorageInner { timestamp, base_fee_per_gas, blob_gas_used, - requests_root: requests.map(|r| proofs::calculate_requests_root(&r.0)), + requests_hash: requests.map(|r| r.requests_hash()), ..Default::default() }; @@ -366,7 +367,6 @@ impl StorageInner { transactions, ommers: ommers.clone(), withdrawals: withdrawals.clone(), - requests: requests.clone(), }, } .with_recovered_senders() @@ -390,7 +390,7 @@ impl StorageInner { // root here let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body.transactions, ommers, withdrawals, requests }; + let body = BlockBody { transactions: body.transactions, ommers, withdrawals }; trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); @@ -682,7 +682,7 @@ mod tests { timestamp, base_fee_per_gas: None, blob_gas_used: Some(0), - requests_root: None, + requests_hash: None, excess_blob_gas: Some(0), ..Default::default() } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 65b7c38df91..1f444590164 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -4,6 +4,7 @@ use crate::{ engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; +use alloy_primitives::Bytes; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -47,9 +48,17 @@ where &self, payload: ExecutionPayload, cancun_fields: Option, + execution_requests: Option>, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }); + // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary + // workaround. + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index fdaad0cc4b0..45d0c57f45e 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,4 +1,5 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; +use alloy_primitives::Bytes; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, @@ -146,6 +147,10 @@ pub enum BeaconEngineMessage { payload: ExecutionPayload, /// The cancun-related newPayload fields, if any. cancun_fields: Option, + // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary + // workaround. + /// The pectra EIP-7685 execution requests. + execution_requests: Option>, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index ccea982bfbd..edd3d6db323 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, Bytes, B256}; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, @@ -1085,6 +1085,9 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, + // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary + // workaround. + execution_requests: Option>, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); @@ -1114,10 +1117,11 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload( + payload, + cancun_fields.into(), + execution_requests, + ) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", %error, "Invalid payload"); @@ -1862,8 +1866,13 @@ where BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { this.on_forkchoice_updated(state, payload_attrs, tx); } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - match this.on_new_payload(payload, cancun_fields) { + BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + } => { + match this.on_new_payload(payload, cancun_fields, execution_requests) { Ok(Either::Right(block)) => { this.set_blockchain_tree_action( BlockchainTreeAction::InsertNewPayload { block, tx }, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 633ae03d8ad..7e9e1ec6b26 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -70,7 +70,7 @@ impl TestEnv { payload: T, cancun_fields: Option, ) -> Result { - self.engine_handle.new_payload(payload.into(), cancun_fields).await + self.engine_handle.new_payload(payload.into(), cancun_fields, None).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 4c2e6b192e7..dabb8c3c34d 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -75,24 +75,6 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } -/// Validate that requests root is present if Prague is active. -/// -/// See [EIP-7685]: General purpose execution layer requests -/// -/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 -#[inline] -pub fn validate_prague_request(block: &SealedBlock) -> Result<(), ConsensusError> { - let requests_root = - block.body.calculate_requests_root().ok_or(ConsensusError::BodyRequestsMissing)?; - let header_requests_root = block.requests_root.ok_or(ConsensusError::RequestsRootMissing)?; - if requests_root != *header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )); - } - Ok(()) -} - /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -125,10 +107,6 @@ pub fn validate_block_pre_execution( validate_cancun_gas(block)?; } - if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - validate_prague_request(block)?; - } - Ok(()) } @@ -458,7 +436,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }; // size: 0x9b5 @@ -478,7 +456,7 @@ mod tests { ( SealedBlock { header: SealedHeader::new(header, seal), - body: BlockBody { transactions, ommers, withdrawals: None, requests: None }, + body: BlockBody { transactions, ommers, withdrawals: None }, }, parent, ) @@ -550,7 +528,6 @@ mod tests { transactions: vec![transaction], ommers: vec![], withdrawals: Some(Withdrawals::default()), - requests: None, }; let block = SealedBlock::new(header, body); diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 660b43865ea..1736caab543 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-primitives.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true # misc diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 91b93c8a75e..4bf5da3b152 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -12,10 +12,11 @@ extern crate alloc; use alloc::{fmt::Debug, vec::Vec}; +use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, - InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, + InvalidTransactionError, Receipt, SealedBlock, SealedHeader, }; /// A consensus implementation that does nothing. @@ -31,12 +32,12 @@ pub struct PostExecutionInput<'a> { /// Receipts of the block. pub receipts: &'a [Receipt], /// EIP-7685 requests of the block. - pub requests: &'a [Request], + pub requests: &'a Requests, } impl<'a> PostExecutionInput<'a> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a [Request]) -> Self { + pub const fn new(receipts: &'a [Receipt], requests: &'a Requests) -> Self { Self { receipts, requests } } } @@ -170,10 +171,10 @@ pub enum ConsensusError { #[display("mismatched block withdrawals root: {_0}")] BodyWithdrawalsRootDiff(GotExpectedBoxed), - /// Error when the requests root in the block is different from the expected requests - /// root. - #[display("mismatched block requests root: {_0}")] - BodyRequestsRootDiff(GotExpectedBoxed), + /// Error when the requests hash in the block is different from the expected requests + /// hash. + #[display("mismatched block requests hash: {_0}")] + BodyRequestsHashDiff(GotExpectedBoxed), /// Error when a block with a specific hash and number is already known. #[display("block with [hash={hash}, number={number}] is already known")] @@ -248,17 +249,17 @@ pub enum ConsensusError { #[display("missing withdrawals root")] WithdrawalsRootMissing, - /// Error when the requests root is missing. - #[display("missing requests root")] - RequestsRootMissing, + /// Error when the requests hash is missing. + #[display("missing requests hash")] + RequestsHashMissing, /// Error when an unexpected withdrawals root is encountered. #[display("unexpected withdrawals root")] WithdrawalsRootUnexpected, - /// Error when an unexpected requests root is encountered. - #[display("unexpected requests root")] - RequestsRootUnexpected, + /// Error when an unexpected requests hash is encountered. + #[display("unexpected requests hash")] + RequestsHashUnexpected, /// Error when withdrawals are missing. #[display("missing withdrawals")] diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 04960304442..58a25dc1257 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -56,8 +56,7 @@ impl TransactionTestContext { delegate_to: Address, wallet: PrivateKeySigner, ) -> TxEnvelope { - let authorization = - Authorization { chain_id: U256::from(chain_id), address: delegate_to, nonce: 0 }; + let authorization = Authorization { chain_id, address: delegate_to, nonce: 0 }; let signature = wallet .sign_hash_sync(&authorization.signature_hash()) .expect("could not sign authorization"); diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index bb227e30419..ab73a81904d 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -162,7 +162,7 @@ where let response = ExecutionWitness { state: HashMap::from_iter(state), codes: Default::default(), - keys: Some(state_preimages), + keys: state_preimages, }; let re_executed_witness_path = self.save_file( format!("{}_{}.witness.re_executed.json", block.number, block.hash()), diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 8bcb7083aab..552cbd04776 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -222,6 +222,8 @@ where self.to_engine.send(BeaconEngineMessage::NewPayload { payload: block_to_payload(payload.block().clone()), cancun_fields, + // todo: prague + execution_requests: None, tx, })?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3eadbbd522d..c63c8fbe291 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, - BlockNumber, B256, U256, + BlockNumber, Bytes, B256, U256, }; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -721,6 +721,7 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, + execution_requests: Option>, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -751,10 +752,11 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload( + payload, + cancun_fields.into(), + execution_requests, + ) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -1236,8 +1238,14 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - let output = self.on_new_payload(payload, cancun_fields); + BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + } => { + let output = + self.on_new_payload(payload, cancun_fields, execution_requests); if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { reth_beacon_consensus::BeaconOnNewPayloadError::Internal( Box::new(e), @@ -2852,6 +2860,7 @@ mod tests { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), + None, ) .unwrap(); } @@ -3114,7 +3123,7 @@ mod tests { let mut test_harness = TestHarness::new(HOLESKY.clone()); - let outcome = test_harness.tree.on_new_payload(payload.into(), None).unwrap(); + let outcome = test_harness.tree.on_new_payload(payload.into(), None, None).unwrap(); assert!(outcome.outcome.is_syncing()); // ensure block is buffered @@ -3159,6 +3168,7 @@ mod tests { BeaconEngineMessage::NewPayload { payload: payload.clone().into(), cancun_fields: None, + execution_requests: None, tx, } .into(), diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 1f344519961..de193bf3bbe 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -73,7 +73,13 @@ impl EngineMessageStore { })?, )?; } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx: _tx } => { + // todo(onbjerg): execution requests + BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests: _, + tx: _tx, + } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index abfa23a57b3..90b5c90aa95 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,7 +1,7 @@ //! Stream wrapper that simulates reorgs. use alloy_consensus::Transaction; -use alloy_primitives::U256; +use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, }; @@ -147,7 +147,12 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }), + Some(BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -162,26 +167,29 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_cancun_fields) = match create_reorg_head( - this.provider, - this.evm_config, - this.payload_validator, - *this.depth, - payload.clone(), - cancun_fields.clone(), - ) { - Ok(result) => result, - Err(error) => { - error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); - // Forward the payload and attempt to create reorg on top of - // the next one - return Poll::Ready(Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - tx, - })) - } - }; + let (reorg_payload, reorg_cancun_fields, reorg_execution_requests) = + match create_reorg_head( + this.provider, + this.evm_config, + this.payload_validator, + *this.depth, + payload.clone(), + cancun_fields.clone(), + execution_requests.clone(), + ) { + Ok(result) => result, + Err(error) => { + error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); + // Forward the payload and attempt to create reorg on top of + // the next one + return Poll::Ready(Some(BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + })) + } + }; let reorg_forkchoice_state = ForkchoiceState { finalized_block_hash: last_forkchoice_state.finalized_block_hash, safe_block_hash: last_forkchoice_state.safe_block_hash, @@ -197,11 +205,17 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }, + BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, cancun_fields: reorg_cancun_fields, + execution_requests: reorg_execution_requests, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -236,7 +250,8 @@ fn create_reorg_head( mut depth: usize, next_payload: ExecutionPayload, next_cancun_fields: Option, -) -> RethResult<(ExecutionPayload, Option)> + next_execution_requests: Option>, +) -> RethResult<(ExecutionPayload, Option, Option>)> where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, @@ -246,7 +261,11 @@ where // Ensure next payload is valid. let next_block = payload_validator - .ensure_well_formed_payload(next_payload, next_cancun_fields.into()) + .ensure_well_formed_payload( + next_payload, + next_cancun_fields.into(), + next_execution_requests, + ) .map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. @@ -401,7 +420,7 @@ where transactions_root: proofs::calculate_transaction_root(&transactions), receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), - requests_root: None, // TODO(prague) + requests_hash: None, // TODO(prague) gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), @@ -411,7 +430,6 @@ where transactions, ommers: reorg_target.body.ommers, withdrawals: reorg_target.body.withdrawals, - requests: None, // TODO(prague) }, } .seal_slow(); @@ -422,5 +440,7 @@ where .header .parent_beacon_block_root .map(|root| CancunPayloadFields { parent_beacon_block_root: root, versioned_hashes }), + // todo(prague) + None, )) } diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index d2450711ecf..47c48282eef 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -41,7 +41,12 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + Some(BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( @@ -56,7 +61,12 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + Some(BeaconEngineMessage::NewPayload { + payload, + cancun_fields, + execution_requests, + tx, + }) } next => next, }; diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index af934d3e2b6..bace4195ca6 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives.workspace = true reth-consensus.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 8f2a8a72042..dd286584a59 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -121,11 +121,11 @@ impl Consensu } if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { - if header.requests_root.is_none() { - return Err(ConsensusError::RequestsRootMissing) + if header.requests_hash.is_none() { + return Err(ConsensusError::RequestsHashMissing) } - } else if header.requests_root.is_some() { - return Err(ConsensusError::RequestsRootUnexpected) + } else if header.requests_hash.is_some() { + return Err(ConsensusError::RequestsHashUnexpected) } Ok(()) diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index e510a91ab96..f990ecc57d8 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,8 @@ +use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; -use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt, Request}; +use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; /// Validate a block with regard to execution results: /// @@ -11,7 +12,7 @@ pub fn validate_block_post_execution( block: &BlockWithSenders, chain_spec: &ChainSpec, receipts: &[Receipt], - requests: &[Request], + requests: &Requests, ) -> Result<(), ConsensusError> { // Check if gas used matches the value set in header. let cumulative_gas_used = @@ -36,15 +37,15 @@ pub fn validate_block_post_execution( } } - // Validate that the header requests root matches the calculated requests root + // Validate that the header requests hash matches the calculated requests hash if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - let Some(header_requests_root) = block.header.requests_root else { - return Err(ConsensusError::RequestsRootMissing) + let Some(header_requests_hash) = block.header.requests_hash else { + return Err(ConsensusError::RequestsHashMissing) }; - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected::new(requests_root, header_requests_root).into(), + let requests_hash = requests.requests_hash(); + if requests_hash != header_requests_hash { + return Err(ConsensusError::BodyRequestsHashDiff( + GotExpected::new(requests_hash, header_requests_hash).into(), )) } } diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ae370fdb9d7..1ad9c5450ee 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,6 +1,6 @@ //! Contains types required for building a payload. -use alloy_eips::eip4844::BlobTransactionSidecar; +use alloy_eips::{eip4844::BlobTransactionSidecar, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ @@ -11,8 +11,7 @@ use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{SealedBlock, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::convert::Infallible; @@ -142,10 +141,17 @@ impl From for ExecutionPayloadEnvelopeV3 { impl From for ExecutionPayloadEnvelopeV4 { fn from(value: EthBuiltPayload) -> Self { - let EthBuiltPayload { block, fees, sidecars, .. } = value; - + let EthBuiltPayload { block, fees, sidecars, executed_block, .. } = value; + + // if we have an executed block, we pop off the first set of requests from the execution + // outcome. the assumption here is that there will always only be one block in the execution + // outcome. + let execution_requests = executed_block + .and_then(|block| block.execution_outcome().requests.first().cloned()) + .map(Requests::take) + .unwrap_or_default(); Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -157,6 +163,7 @@ impl From for ExecutionPayloadEnvelopeV4 { // should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + execution_requests, } } } diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index e78becd960c..4cf1c6ae9da 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,11 +1,11 @@ //! EIP-6110 deposit requests parsing use alloc::{string::ToString, vec::Vec}; -use alloy_eips::eip6110::{DepositRequest, MAINNET_DEPOSIT_CONTRACT_ADDRESS}; -use alloy_primitives::Log; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::{Bytes, Log}; use alloy_sol_types::{sol, SolEvent}; use reth_chainspec::ChainSpec; use reth_evm::execute::BlockValidationError; -use reth_primitives::{Receipt, Request}; +use reth_primitives::Receipt; sol! { #[allow(missing_docs)] @@ -20,73 +20,57 @@ sol! { /// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) /// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a -/// [vector](Vec) of (requests)[Request]. +/// [vector](Vec) of (requests)[`alloy_eips::eip7685::Requests`]. pub fn parse_deposits_from_receipts<'a, I>( chain_spec: &ChainSpec, receipts: I, -) -> Result, BlockValidationError> +) -> Result where I: IntoIterator, { + let mut requests = Vec::new(); let deposit_contract_address = chain_spec .deposit_contract .as_ref() .map_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS, |contract| contract.address); - receipts + let logs: Vec<_> = receipts .into_iter() - .flat_map(|receipt| receipt.logs.iter()) - // No need to filter for topic because there's only one event and that's the Deposit event - // in the deposit contract. + .flat_map(|receipt| &receipt.logs) + // No need to filter for topic because there's only one event and that's the Deposit + // event in the deposit contract. .filter(|log| log.address == deposit_contract_address) - .map(|log| { - let decoded_log = DepositEvent::decode_log(log, false)?; - let deposit = parse_deposit_from_log(&decoded_log); - Ok(Request::DepositRequest(deposit)) - }) - .collect::, _>>() - .map_err(|err: alloy_sol_types::Error| { - BlockValidationError::DepositRequestDecode(err.to_string()) - }) + .collect(); + + for log in &logs { + let decoded_log = + DepositEvent::decode_log(log, false).map_err(|err: alloy_sol_types::Error| { + BlockValidationError::DepositRequestDecode(err.to_string()) + })?; + requests.extend(parse_deposit_from_log(&decoded_log).as_ref()) + } + + Ok(requests.into()) } -fn parse_deposit_from_log(log: &Log) -> DepositRequest { +fn parse_deposit_from_log(log: &Log) -> Bytes { // SAFETY: These `expect` https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/solidity_deposit_contract/deposit_contract.sol#L107-L110 // are safe because the `DepositEvent` is the only event in the deposit contract and the length // checks are done there. - DepositRequest { - pubkey: log - .pubkey - .as_ref() - .try_into() - .expect("pubkey length should be enforced in deposit contract"), - withdrawal_credentials: log - .withdrawal_credentials - .as_ref() - .try_into() - .expect("withdrawal_credentials length should be enforced in deposit contract"), - amount: u64::from_le_bytes( - log.amount - .as_ref() - .try_into() - .expect("amount length should be enforced in deposit contract"), - ), - signature: log - .signature - .as_ref() - .try_into() - .expect("signature length should be enforced in deposit contract"), - index: u64::from_le_bytes( - log.index - .as_ref() - .try_into() - .expect("deposit index length should be enforced in deposit contract"), - ), - } + [ + log.pubkey.as_ref(), + log.withdrawal_credentials.as_ref(), + log.amount.as_ref(), + log.signature.as_ref(), + log.index.as_ref(), + ] + .concat() + .into() } #[cfg(test)] mod tests { use super::*; + use alloy_primitives::bytes; use reth_chainspec::MAINNET; use reth_primitives::TxType; @@ -119,9 +103,12 @@ mod tests { }, ]; - let requests = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); - assert_eq!(requests.len(), 2); - assert_eq!(requests[0].as_deposit_request().unwrap().amount, 32e9 as u64); - assert_eq!(requests[1].as_deposit_request().unwrap().amount, 32e9 as u64); + let request_data = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); + assert_eq!( + request_data, + bytes!( + "998c8086669bf65e24581cda47d8537966e9f5066fc6ffdcba910a1bfb91eae7a4873fcce166a1c4ea217e6b1afd396201000000000000000000000001c340fb72ed14d4eaa71f7633ee9e33b88d4f39004059730700000098ddbffd700c1aac324cfdf0492ff289223661eb26718ce3651ba2469b22f480d56efab432ed91af05a006bde0c1ea68134e0acd8cacca0c13ad1f716db874b44abfcc966368019753174753bca3af2ea84bc569c46f76592a91e97f311eddece474160000000000a1a2ba870a90e889aa594a0cc1c6feffb94c2d8f65646c937f1f456a315ef649533e25a4614d8f4f66ebdb06481b90af0100000000000000000000000a0f04a231efbc29e1db7d086300ff550211c2f60040597307000000ad416d590e1a7f52baff770a12835b68904efad22cc9f8ba531e50cbbd26f32b9c7373cf6538a0577f501e4d3e3e63e208767bcccaae94e1e3720bfb734a286f9c017d17af46536545ccb7ca94d71f295e71f6d25bf978c09ada6f8d3f7ba039e374160000000000" + ) + ); } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 9c7748a561f..428458fcd04 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,8 +4,9 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; @@ -19,7 +20,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, EthereumHardfork, Header, Receipt, Request}; +use reth_primitives::{BlockWithSenders, EthereumHardfork, Header, Receipt}; use reth_prune_types::PruneModes; use reth_revm::{ batch::BlockBatchRecord, @@ -104,7 +105,7 @@ where #[derive(Debug, Clone)] struct EthExecuteOutput { receipts: Vec, - requests: Vec, + requests: Requests, gas_used: u64, } @@ -122,7 +123,7 @@ where EvmConfig: ConfigureEvm
, { /// Executes the transactions in the block and returns the receipts of the transactions in the - /// block, the total gas used and the list of EIP-7685 [requests](Request). + /// block, the total gas used and the list of EIP-7685 [requests](Requests). /// /// This applies the pre-execution and post-execution changes that require an [EVM](Evm), and /// executes the transactions. @@ -205,11 +206,11 @@ where let deposit_requests = crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; - let post_execution_requests = system_caller.apply_post_execution_changes(&mut evm)?; - - [deposit_requests, post_execution_requests].concat() + let mut requests = Requests::new(vec![deposit_requests]); + requests.extend(system_caller.apply_post_execution_changes(&mut evm)?); + requests } else { - vec![] + Requests::default() }; Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used }) @@ -283,7 +284,7 @@ where /// Execute a single block and apply the state changes to the internal state. /// /// Returns the receipts of the transactions in the block, the total gas used and the list of - /// EIP-7685 [requests](Request). + /// EIP-7685 [requests](Requests). /// /// Returns an error if execution fails. fn execute_without_verification_with_state_hook( @@ -494,11 +495,12 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; + use alloy_consensus::TxLegacy; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + eip7685::EMPTY_REQUESTS_HASH, }; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; @@ -583,7 +585,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -612,12 +613,7 @@ mod tests { &BlockWithSenders { block: Block { header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, - }, + body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, }, senders: vec![], }, @@ -684,7 +680,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -739,7 +734,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -1016,7 +1010,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let provider = executor_provider(chain_spec); @@ -1075,7 +1069,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1121,7 +1115,7 @@ mod tests { ); let mut header = chain_spec.genesis_header().clone(); - header.requests_root = Some(EMPTY_ROOT_HASH); + header.requests_hash = Some(EMPTY_REQUESTS_HASH); let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); @@ -1159,7 +1153,7 @@ mod tests { parent_hash: header_hash, timestamp: 1, number: 1, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let header_hash = header.hash_slow(); @@ -1196,7 +1190,7 @@ mod tests { parent_hash: header_hash, timestamp: 1, number: 2, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1254,15 +1248,16 @@ mod tests { HashMap::default(), ); - // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 + // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); + let withdrawal_amount = fixed_bytes!("0203040506070809"); let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); assert_eq!(input.len(), 56); let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; - header.gas_used = 134_807; + // measured + header.gas_used = 135_856; header.receipts_root = b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); @@ -1272,10 +1267,10 @@ mod tests { chain_id: Some(chain_spec.chain.id()), nonce: 1, gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 134_807, + gas_limit: header.gas_used, to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(1), + value: U256::from(2), input, }), ); @@ -1302,11 +1297,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - let request = requests.first().unwrap(); - let withdrawal_request = request.as_withdrawal_request().unwrap(); - assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); - assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); + assert!(requests[0].is_empty(), "there should be no deposits"); + assert!(!requests[1].is_empty(), "there should be a withdrawal"); + assert!(requests[2].is_empty(), "there should be no consolidations"); } #[test] diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs index 7a297be498a..714f673c858 100644 --- a/crates/ethereum/evm/src/strategy.rs +++ b/crates/ethereum/evm/src/strategy.rs @@ -6,6 +6,7 @@ use crate::{ }; use alloc::sync::Arc; use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; use reth_consensus::ConsensusError; @@ -18,7 +19,7 @@ use reth_evm::{ system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, ConfigureEvmEnv, }; -use reth_primitives::{BlockWithSenders, Header, Receipt, Request}; +use reth_primitives::{BlockWithSenders, Header, Receipt}; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, state_change::post_block_balance_increments, @@ -194,7 +195,7 @@ where block: &BlockWithSenders, total_difficulty: U256, receipts: &[Receipt], - ) -> Result, Self::Error> { + ) -> Result { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); @@ -203,12 +204,11 @@ where let deposit_requests = crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - let post_execution_requests = - self.system_caller.apply_post_execution_changes(&mut evm)?; - - [deposit_requests, post_execution_requests].concat() + let mut requests = Requests::new(vec![deposit_requests]); + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); + requests } else { - vec![] + Requests::default() }; drop(evm); @@ -257,7 +257,7 @@ where &self, block: &BlockWithSenders, receipts: &[Receipt], - requests: &[Request], + requests: &Requests, ) -> Result<(), ConsensusError> { validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) } @@ -266,11 +266,12 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; + use alloy_consensus::TxLegacy; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + eip7685::EMPTY_REQUESTS_HASH, }; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; @@ -365,7 +366,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -397,7 +397,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -468,7 +467,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -523,7 +521,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -797,7 +794,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let provider = executor_provider(chain_spec); @@ -855,7 +852,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -901,7 +898,7 @@ mod tests { ); let mut header = chain_spec.genesis_header().clone(); - header.requests_root = Some(EMPTY_ROOT_HASH); + header.requests_hash = Some(EMPTY_REQUESTS_HASH); let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); @@ -938,7 +935,7 @@ mod tests { parent_hash: header_hash, timestamp: 1, number: 1, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let header_hash = header.hash_slow(); @@ -977,7 +974,7 @@ mod tests { parent_hash: header_hash, timestamp: 1, number: 2, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1039,15 +1036,16 @@ mod tests { HashMap::default(), ); - // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 + // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); + let withdrawal_amount = fixed_bytes!("0203040506070809"); let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); assert_eq!(input.len(), 56); let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; - header.gas_used = 134_807; + // measured + header.gas_used = 135_856; header.receipts_root = b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); @@ -1057,10 +1055,10 @@ mod tests { chain_id: Some(chain_spec.chain.id()), nonce: 1, gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 134_807, + gas_limit: header.gas_used, to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(1), + value: U256::from(2), input, }), ); @@ -1087,11 +1085,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - let request = requests.first().unwrap(); - let withdrawal_request = request.as_withdrawal_request().unwrap(); - assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); - assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); + assert!(requests[0].is_empty(), "there should be no deposits"); + assert!(!requests[1].is_empty(), "there should be a withdrawal"); + assert!(requests[2].is_empty(), "there should be no consolidations"); } #[test] diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index ce37a4f8ea4..443e837b2ed 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -33,8 +33,9 @@ revm.workspace = true revm-primitives.workspace = true # alloy -alloy-primitives.workspace = true +alloy-eips.workspace = true alloy-consensus.workspace = true +alloy-primitives.workspace = true # misc tracing.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index dcf54fc0248..e09228302e4 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,6 +10,7 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::eip7685::Requests; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -25,7 +26,7 @@ use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, - proofs::{self, calculate_requests_root}, + proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, EthereumHardforks, Header, Receipt, }; @@ -308,9 +309,7 @@ where } // calculate the requests and the requests root - let (requests, requests_root) = if chain_spec - .is_prague_active_at_timestamp(attributes.timestamp) - { + let requests = if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; let withdrawal_requests = system_caller @@ -328,11 +327,9 @@ where ) .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - let requests = [deposit_requests, withdrawal_requests, consolidation_requests].concat(); - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) + Some(Requests::new(vec![deposit_requests, withdrawal_requests, consolidation_requests])) } else { - (None, None) + None }; let WithdrawalsOutcome { withdrawals_root, withdrawals } = @@ -414,13 +411,13 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash: requests.map(|r| r.requests_hash()), }; // seal the block let block = Block { header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; let sealed_block = block.seal_slow(); diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 9bd6537326b..49e9623021e 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -25,7 +25,6 @@ serde = { workspace = true, optional = true } serde_with = { workspace = true, optional = true } [dev-dependencies] -alloy-eips.workspace = true arbitrary.workspace = true bincode.workspace = true rand.workspace = true @@ -35,5 +34,9 @@ reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] serde = ["dep:serde", "reth-trie/serde", "revm/serde"] -serde-bincode-compat = ["reth-primitives/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with"] +serde-bincode-compat = [ + "reth-primitives/serde-bincode-compat", + "reth-trie/serde-bincode-compat", + "serde_with", +] std = [] diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs index 0cf5d705079..ae5ad2c0b7c 100644 --- a/crates/evm/execution-types/src/execute.rs +++ b/crates/evm/execution-types/src/execute.rs @@ -1,5 +1,5 @@ +use alloy_eips::eip7685::Requests; use alloy_primitives::U256; -use reth_primitives::Request; use revm::db::BundleState; /// A helper type for ethereum block inputs that consists of a block and the total difficulty. @@ -33,8 +33,8 @@ pub struct BlockExecutionOutput { pub state: BundleState, /// All the receipts of the transactions in the block. pub receipts: Vec, - /// All the EIP-7685 requests of the transactions in the block. - pub requests: Vec, + /// All the EIP-7685 requests in the block. + pub requests: Requests, /// The total gas used by the block. pub gas_used: u64, } diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 08ddf9e4167..0fde01547f7 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,6 +1,7 @@ use crate::BlockExecutionOutput; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, Requests, StorageEntry}; +use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, StorageEntry}; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, @@ -357,7 +358,7 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), first_block: value.1, - requests: vec![Requests::from(value.0.requests)], + requests: vec![value.0.requests], } } } @@ -365,9 +366,9 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { #[cfg(test)] mod tests { use super::*; - use alloy_eips::{eip6110::DepositRequest, eip7002::WithdrawalRequest}; - use alloy_primitives::{Address, FixedBytes, LogData, B256}; - use reth_primitives::{Receipts, Request, Requests, TxType}; + use alloy_eips::eip7685::Requests; + use alloy_primitives::{bytes, Address, LogData, B256}; + use reth_primitives::{Receipts, TxType}; use std::collections::HashMap; #[test] @@ -393,29 +394,8 @@ mod tests { })]], }; - // Create a Requests object with a vector of requests, including DepositRequest and - // WithdrawalRequest - let requests = vec![Requests(vec![ - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }), - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([23; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 34343, - signature: FixedBytes::<96>::from([43; 96]), - index: 1212, - }), - Request::WithdrawalRequest(WithdrawalRequest { - source_address: Address::from([1; 20]), - validator_pubkey: FixedBytes::<48>::from([10; 48]), - amount: 72, - }), - ])]; + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; // Define the first block number let first_block = 123; @@ -657,17 +637,12 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request]), Requests(vec![request])]; + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -681,7 +656,7 @@ mod tests { assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); // Assert that the requests are properly cut after reverting to the initial block number. - assert_eq!(exec_res.requests, vec![Requests(vec![request])]); + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); // Assert that the revert_to method returns false when attempting to revert to a block // number greater than the initial block number. @@ -709,17 +684,11 @@ mod tests { // Create a Receipts object containing the receipt. let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request])]; + let requests = vec![Requests::new(vec![request.clone()])]; // Define the initial block number. let first_block = 123; @@ -739,7 +708,7 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 123, } ); @@ -771,18 +740,15 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = - vec![Requests(vec![request]), Requests(vec![request]), Requests(vec![request])]; + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -796,7 +762,7 @@ mod tests { let lower_execution_outcome = ExecutionOutcome { bundle: Default::default(), receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, - requests: vec![Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()])], first_block, }; @@ -806,7 +772,7 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 124, }; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index f52325b43e8..d7c8590eea8 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -9,10 +9,11 @@ pub use reth_storage_errors::provider::ProviderError; use crate::system_calls::OnStateHook; use alloc::{boxed::Box, vec::Vec}; +use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; use core::{fmt::Display, marker::PhantomData}; use reth_consensus::ConsensusError; -use reth_primitives::{BlockWithSenders, Receipt, Request}; +use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{db::BundleState, State}; @@ -190,7 +191,7 @@ pub trait BlockExecutionStrategy { block: &BlockWithSenders, total_difficulty: U256, receipts: &[Receipt], - ) -> Result, Self::Error>; + ) -> Result; /// Returns a reference to the current state. fn state_ref(&self) -> &State; @@ -209,7 +210,7 @@ pub trait BlockExecutionStrategy { &self, block: &BlockWithSenders, receipts: &[Receipt], - requests: &[Request], + requests: &Requests, ) -> Result<(), ConsensusError>; } @@ -450,10 +451,10 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_eips::eip6110::DepositRequest; use alloy_primitives::U256; use reth_chainspec::{ChainSpec, MAINNET}; use revm::db::{CacheDB, EmptyDBTyped}; + use revm_primitives::bytes; use std::sync::Arc; #[derive(Clone, Default)] @@ -545,14 +546,14 @@ mod tests { _evm_config: EvmConfig, state: State, execute_transactions_result: (Vec, u64), - apply_post_execution_changes_result: Vec, + apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { execute_transactions_result: (Vec, u64), - apply_post_execution_changes_result: Vec, + apply_post_execution_changes_result: Requests, finish_result: BundleState, } @@ -607,7 +608,7 @@ mod tests { _block: &BlockWithSenders, _total_difficulty: U256, _receipts: &[Receipt], - ) -> Result, Self::Error> { + ) -> Result { Ok(self.apply_post_execution_changes_result.clone()) } @@ -629,7 +630,7 @@ mod tests { &self, _block: &BlockWithSenders, _receipts: &[Receipt], - _requests: &[Request], + _requests: &Requests, ) -> Result<(), ConsensusError> { Ok(()) } @@ -651,8 +652,7 @@ mod tests { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; let expected_execute_transactions_result = (expected_receipts.clone(), expected_gas_used); - let expected_apply_post_execution_changes_result = - vec![Request::DepositRequest(DepositRequest::default())]; + let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); let strategy_factory = TestExecutorStrategyFactory { diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 9af944e42a5..5e36f2bdeb9 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,10 +1,10 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use alloy_eips::eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}; -use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; +use alloc::{boxed::Box, format}; +use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; +use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Header, Request}; +use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; @@ -62,52 +62,23 @@ where Ok(res) } -/// Parses the withdrawal requests from the execution output. +/// Calls the withdrawals requests system contract, and returns the requests from the execution +/// output. #[inline] -pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_pubkey.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests - .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); } - - Ok(withdrawal_requests) } diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index f09d4be81af..7a55c7a5aea 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,10 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use alloy_eips::eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}; -use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; +use alloc::{boxed::Box, format}; +use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; +use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Header, Request}; +use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; @@ -64,56 +64,23 @@ where Ok(res) } -/// Parses the consolidation requests from the execution output. +/// Calls the consolidation requests system contract, and returns the requests from the execution +/// output. #[inline] -pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Consolidations are encoded as a series of consolidation requests, each with the following - // format: - // - // +------+--------+---------------+ - // | addr | pubkey | target pubkey | - // +------+--------+---------------+ - // 20 48 48 - - const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; - let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < CONSOLIDATION_REQUEST_SIZE { - return Err(BlockValidationError::ConsolidationRequestsContractCall { - message: "invalid consolidation request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut source_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(source_pubkey.as_mut_slice()); - - let mut target_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(target_pubkey.as_mut_slice()); - - consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { - source_address, - source_pubkey, - target_pubkey, - })); } - - Ok(consolidation_requests) } diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 5dc3f35bd3a..d71dcfedabb 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,11 +1,13 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, vec}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header, Request}; +use reth_primitives::{Block, Header}; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; @@ -119,17 +121,18 @@ where pub fn apply_post_execution_changes( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, { + // todo // Collect all EIP-7685 requests let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; // Collect all EIP-7251 requests let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; - Ok([withdrawal_requests, consolidation_requests].concat()) + Ok(Requests::new(vec![withdrawal_requests, consolidation_requests])) } /// Applies the pre-block call to the EIP-2935 blockhashes contract. @@ -247,7 +250,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -263,7 +266,7 @@ where pub fn apply_withdrawal_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -285,7 +288,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -301,7 +304,7 @@ where pub fn apply_consolidation_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 261b36420b4..c20f43dca9d 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -7,6 +7,7 @@ use crate::{ }, system_calls::OnStateHook, }; +use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; @@ -62,7 +63,10 @@ impl Executor for MockExecutorProvider { Ok(BlockExecutionOutput { state: bundle, receipts: receipts.into_iter().flatten().flatten().collect(), - requests: requests.into_iter().flatten().collect(), + requests: requests.into_iter().fold(Requests::default(), |mut reqs, req| { + reqs.extend(req); + reqs + }), gas_used: 0, }) } diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 1c793975c75..5eaf92bfefc 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -10,7 +10,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, Requests, + constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ @@ -29,7 +29,7 @@ pub(crate) fn to_execution_outcome( bundle: block_execution_output.state.clone(), receipts: block_execution_output.receipts.clone().into(), first_block: block_number, - requests: vec![Requests(block_execution_output.requests.clone())], + requests: vec![block_execution_output.requests.clone()], } } diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 6e5483f3a0e..d60c63fc1f6 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -277,7 +277,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ]), }.encode(&mut data); @@ -312,7 +312,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ]), }; @@ -412,11 +412,10 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ], withdrawals: None, - requests: None } ]), }; @@ -488,11 +487,10 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }, ], withdrawals: None, - requests: None } ]), }; diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index b25a7568b22..8c11bfa82bb 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -143,7 +143,7 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None }; assert_eq!(header.hash_slow(), expected_hash); } @@ -256,7 +256,7 @@ mod tests { blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -296,7 +296,7 @@ mod tests { parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), - requests_root: None, + requests_hash: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 0116f134881..e5129b68674 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -347,22 +347,6 @@ fn ensure_valid_body_response( _ => return Err(ConsensusError::WithdrawalsRootUnexpected), } - match (header.requests_root, &block.requests) { - (Some(header_requests_root), Some(requests)) => { - let requests = requests.0.as_slice(); - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::RequestsRootUnexpected), - } - Ok(()) } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index a1e2021a4ad..f251347c58b 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -23,6 +23,7 @@ reth-prune-types.workspace = true reth-consensus.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-consensus.workspace = true alloy-consensus.workspace = true @@ -41,8 +42,6 @@ derive_more.workspace = true tracing.workspace = true [dev-dependencies] -alloy-eips.workspace = true - reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 26342062321..3a86f5bbae4 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -5,6 +5,7 @@ use crate::{ }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks}; @@ -380,7 +381,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: vec![], + requests: Requests::default(), gas_used, }) } @@ -403,7 +404,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: vec![], + requests: Requests::default(), gas_used, }) } @@ -429,7 +430,7 @@ where Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, - requests: vec![], + requests: Requests::default(), gas_used, }) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 3eda2878cae..ffc82fde43c 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -10,7 +10,6 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -#[macro_use] extern crate alloc; use alloc::{sync::Arc, vec::Vec}; diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs index 9ba43604ea0..199a8a4d327 100644 --- a/crates/optimism/evm/src/strategy.rs +++ b/crates/optimism/evm/src/strategy.rs @@ -3,6 +3,7 @@ use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; +use alloy_eips::eip7685::Requests; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; @@ -17,7 +18,7 @@ use reth_evm::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Request, TxType}; +use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, state_change::post_block_balance_increments, @@ -235,7 +236,7 @@ where block: &BlockWithSenders, total_difficulty: U256, _receipts: &[Receipt], - ) -> Result, Self::Error> { + ) -> Result { let balance_increments = post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); // increment balances @@ -243,7 +244,7 @@ where .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(vec![]) + Ok(Requests::default()) } fn state_ref(&self) -> &State { @@ -267,7 +268,7 @@ where &self, block: &BlockWithSenders, receipts: &[Receipt], - _requests: &[Request], + _requests: &Requests, ) -> Result<(), ConsensusError> { validate_block_post_execution(block, &self.chain_spec.clone(), receipts) } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index e590635f524..b6ab9b87956 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -507,13 +507,13 @@ where parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used, excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root: None, + requests_hash: None, }; // seal the block let block = Block { header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests: None }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; let sealed_block = block.seal_slow(); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 122c2fde526..d5d1620e54b 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -18,8 +18,7 @@ use reth_primitives::{ transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::sync::Arc; @@ -249,7 +248,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { B256::ZERO }; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -262,6 +261,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), parent_beacon_block_root, + execution_requests: vec![], } } } diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index bd42298588f..7153ae3155c 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -85,7 +85,7 @@ pub const BEDROCK_HEADER: Header = Header { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, }; /// Bedrock total difficulty on Optimism Mainnet. diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 200b626d8c3..f2f09cdc7ff 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -227,7 +227,6 @@ impl OpReceiptBuilder { from, to, contract_address, - state_root, authorization_list, } = core_receipt; @@ -265,7 +264,6 @@ impl OpReceiptBuilder { from, to, contract_address, - state_root, authorization_list, }; diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index d435ed1d884..347b690c5c7 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Requests, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -74,6 +74,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } } diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index 2662b987f88..a96799d7bce 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -18,4 +18,6 @@ reth-primitives.workspace = true reth-rpc-types-compat.workspace = true # alloy +alloy-eips.workspace = true +alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 55002b0a98b..fdcd9244a43 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_eips::eip7685::Requests; +use alloy_primitives::Bytes; use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; use reth_chainspec::EthereumHardforks; use reth_primitives::SealedBlock; @@ -112,12 +114,17 @@ impl ExecutionPayloadValidator { &self, payload: ExecutionPayload, cancun_fields: MaybeCancunPayloadFields, + execution_requests: Option>, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = - try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); + let sealed_block = try_into_block( + payload, + cancun_fields.parent_beacon_block_root(), + execution_requests.map(Requests::new), + )? + .seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { @@ -162,7 +169,7 @@ impl ExecutionPayloadValidator { let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); if !shanghai_active && sealed_block.body.withdrawals.is_some() { // shanghai not active but withdrawals present - return Err(PayloadError::PreShanghaiBlockWithWitdrawals) + return Err(PayloadError::PreShanghaiBlockWithWithdrawals) } if !self.is_prague_active_at_timestamp(sealed_block.timestamp) && diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index b34987327ee..2fec7566656 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -64,4 +64,4 @@ arbitrary = [ "dep:proptest", "dep:proptest-arbitrary-interop", ] -serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] \ No newline at end of file +serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 85eeda166c4..c9b673ec724 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -2,8 +2,8 @@ use alloc::{fmt, vec::Vec}; -use alloy_consensus::{BlockHeader, Request, Transaction, TxType}; -use alloy_eips::eip4895::Withdrawal; +use alloy_consensus::{BlockHeader, Transaction, TxType}; +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256}; use crate::Block; @@ -30,9 +30,6 @@ pub trait BlockBody: /// Withdrawals in block. type Withdrawals: Iterator; - /// Requests in block. - type Requests: Iterator; - /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::SignedTransaction]; @@ -43,8 +40,8 @@ pub trait BlockBody: /// Returns reference to uncle block headers. fn ommers(&self) -> &[Self::Header]; - /// Returns [`Request`] in block, if any. - fn requests(&self) -> Option<&Self::Requests>; + /// Returns [`Requests`] in block, if any. + fn requests(&self) -> Option<&Requests>; /// Create a [`Block`] from the body and its header. fn into_block>(self, header: Self::Header) -> T { @@ -63,12 +60,6 @@ pub trait BlockBody: // `Withdrawals` and `Withdrawals` moved to alloy fn calculate_withdrawals_root(&self) -> Option; - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - // todo: can be default impl if `calculate_requests_root` made into a method on - // `Requests` and `Requests` moved to alloy - fn calculate_requests_root(&self) -> Option; - /// Recover signer addresses for all transactions in the block body. fn recover_signers(&self) -> Option>; diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index ef5c0d02536..c5f6e86b9db 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -37,7 +37,7 @@ pub const fn generate_valid_header( } // Placeholder for future EIP adjustments - header.requests_root = None; + header.requests_hash = None; header } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index dd10ac9c5f1..a77669ec367 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -29,9 +29,6 @@ pub use transaction::{signed::SignedTransaction, Transaction}; mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; -pub mod request; -pub use request::{Request, Requests}; - pub mod block; pub use block::{body::BlockBody, Block}; diff --git a/crates/primitives-traits/src/mod.rs b/crates/primitives-traits/src/mod.rs deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/crates/primitives-traits/src/request.rs b/crates/primitives-traits/src/request.rs deleted file mode 100644 index c08af3fd622..00000000000 --- a/crates/primitives-traits/src/request.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! EIP-7685 requests. - -use alloc::vec::Vec; -pub use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_rlp::{Decodable, Encodable}; -use derive_more::{Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::Bytes; -use serde::{Deserialize, Serialize}; - -/// A list of EIP-7685 requests. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - Deref, - DerefMut, - From, - IntoIterator, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -pub struct Requests(pub Vec); - -impl Encodable for Requests { - fn encode(&self, out: &mut dyn bytes::BufMut) { - let mut h = alloy_rlp::Header { list: true, payload_length: 0 }; - - let mut encoded = Vec::new(); - for req in &self.0 { - let encoded_req = req.encoded_7685(); - h.payload_length += encoded_req.len(); - encoded.push(Bytes::from(encoded_req)); - } - - h.encode(out); - for req in encoded { - req.encode(out); - } - } -} - -impl Decodable for Requests { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok( as Decodable>::decode(buf)? - .into_iter() - .map(|bytes| Request::decode_7685(&mut bytes.as_ref())) - .collect::, alloy_eips::eip7685::Eip7685Error>>() - .map(Self)?) - } -} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 8596f8d766c..05ccd9081a2 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -60,7 +60,6 @@ zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } [dev-dependencies] # eth @@ -97,7 +96,6 @@ reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", - "dep:proptest", "alloy-eips/arbitrary", "rand", "reth-codec", diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 0ac1458c5ac..917baef6661 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -48,9 +48,6 @@ impl TryFrom Option { - None - } -} - /// Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. @@ -120,7 +109,6 @@ mod block_rlp { transactions: Vec, ommers: Vec
, withdrawals: Option, - requests: Option, } #[derive(RlpEncodable)] @@ -130,50 +118,34 @@ mod block_rlp { transactions: &'a Vec, ommers: &'a Vec
, withdrawals: Option<&'a Withdrawals>, - requests: Option<&'a Requests>, } impl<'a> From<&'a Block> for HelperRef<'a, Header> { fn from(block: &'a Block) -> Self { - let Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } = - block; - Self { - header, - transactions, - ommers, - withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), - } + let Block { header, body: BlockBody { transactions, ommers, withdrawals } } = block; + Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } } } impl<'a> From<&'a SealedBlock> for HelperRef<'a, SealedHeader> { fn from(block: &'a SealedBlock) -> Self { - let SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - } = block; - Self { - header, - transactions, - ommers, - withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), - } + let SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } = + block; + Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } } } impl Decodable for Block { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests } = Helper::decode(b)?; - Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, requests } }) + let Helper { header, transactions, ommers, withdrawals } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals } }) } } impl Decodable for SealedBlock { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests } = Helper::decode(b)?; - Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, requests } }) + let Helper { header, transactions, ommers, withdrawals } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals } }) } } @@ -215,13 +187,7 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { Ok(Self { header: u.arbitrary()?, - body: BlockBody { - transactions, - ommers, - // for now just generate empty requests, see HACK above - requests: u.arbitrary()?, - withdrawals: u.arbitrary()?, - }, + body: BlockBody { transactions, ommers, withdrawals: u.arbitrary()? }, }) } } @@ -570,8 +536,6 @@ pub struct BlockBody { pub ommers: Vec
, /// Withdrawals in the block. pub withdrawals: Option, - /// Requests in the block. - pub requests: Option, } impl BlockBody { @@ -596,12 +560,6 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - pub fn calculate_requests_root(&self) -> Option { - self.requests.as_ref().map(|r| crate::proofs::calculate_requests_root(&r.0)) - } - /// Recover signer addresses for all transactions in the block body. pub fn recover_signers(&self) -> Option> { TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) @@ -670,7 +628,6 @@ impl From for BlockBody { transactions: block.body.transactions, ommers: block.body.ommers, withdrawals: block.body.withdrawals, - requests: block.body.requests, } } } @@ -692,8 +649,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { }) .collect::>>()?; - // for now just generate empty requests, see HACK above - Ok(Self { transactions, ommers, requests: None, withdrawals: u.arbitrary()? }) + Ok(Self { transactions, ommers, withdrawals: u.arbitrary()? }) } } @@ -703,7 +659,7 @@ pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::serde_bincode_compat::Header; use alloy_primitives::Address; - use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Requests, Withdrawals}; + use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Withdrawals}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -729,7 +685,6 @@ pub(super) mod serde_bincode_compat { transactions: Vec>, ommers: Vec>, withdrawals: Cow<'a, Option>, - requests: Cow<'a, Option>, } impl<'a> From<&'a super::BlockBody> for BlockBody<'a> { @@ -738,7 +693,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.iter().map(Into::into).collect(), ommers: value.ommers.iter().map(Into::into).collect(), withdrawals: Cow::Borrowed(&value.withdrawals), - requests: Cow::Borrowed(&value.requests), } } } @@ -749,7 +703,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.into_iter().map(Into::into).collect(), ommers: value.ommers.into_iter().map(Into::into).collect(), withdrawals: value.withdrawals.into_owned(), - requests: value.requests.into_owned(), } } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a9e8c08203d..7a9a6bf457f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -45,7 +45,7 @@ pub use receipt::{ }; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, Request, Requests, SealedHeader, StorageEntry, Withdrawal, Withdrawals, + LogData, SealedHeader, StorageEntry, Withdrawal, Withdrawals, }; pub use static_file::StaticFileSegment; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 4efbb588e10..1697246702a 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,11 +1,11 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{ - Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, + Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned, Withdrawal, }; use alloc::vec::Vec; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; @@ -29,13 +29,6 @@ pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) } -/// Calculate [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685) requests root. -/// -/// NOTE: The requests are encoded as `id + request` -pub fn calculate_requests_root(requests: &[Request]) -> B256 { - ordered_trie_root_with_encoder(requests, |item, buf| item.encode_7685(buf)) -} - /// Calculates the receipt root for a header. pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b7eeeadc897..1d410da1ea8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -802,18 +802,6 @@ impl alloy_consensus::Transaction for Transaction { } } - fn to(&self) -> TxKind { - match self { - Self::Legacy(tx) => tx.to(), - Self::Eip2930(tx) => tx.to(), - Self::Eip1559(tx) => tx.to(), - Self::Eip4844(tx) => tx.to(), - Self::Eip7702(tx) => tx.to(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.to(), - } - } - fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -826,7 +814,7 @@ impl alloy_consensus::Transaction for Transaction { } } - fn input(&self) -> &[u8] { + fn input(&self) -> &Bytes { match self { Self::Legacy(tx) => tx.input(), Self::Eip2930(tx) => tx.input(), @@ -885,6 +873,18 @@ impl alloy_consensus::Transaction for Transaction { Self::Deposit(tx) => tx.authorization_list(), } } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), + } + } } /// Signed transaction without its Hash. Used type for inserting into the DB. @@ -1383,15 +1383,11 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().priority_fee_or_price() } - fn to(&self) -> TxKind { - alloy_consensus::Transaction::to(self.deref()) - } - fn value(&self) -> U256 { self.deref().value() } - fn input(&self) -> &[u8] { + fn input(&self) -> &Bytes { self.deref().input() } @@ -1410,6 +1406,10 @@ impl alloy_consensus::Transaction for TransactionSigned { fn authorization_list(&self) -> Option<&[SignedAuthorization]> { self.deref().authorization_list() } + + fn kind(&self) -> TxKind { + self.deref().kind() + } } impl From for TransactionSigned { @@ -2242,8 +2242,8 @@ mod tests { let tx = TransactionSigned::decode_2718(&mut data.as_slice()).unwrap(); let sender = tx.recover_signer().unwrap(); assert_eq!(sender, address!("001e2b7dE757bA469a57bF6b23d982458a07eFcE")); - assert_eq!(tx.to(), Some(address!("D9e1459A7A482635700cBc20BBAF52D495Ab9C96")).into()); - assert_eq!(tx.input(), hex!("1b55ba3a")); + assert_eq!(tx.to(), Some(address!("D9e1459A7A482635700cBc20BBAF52D495Ab9C96"))); + assert_eq!(tx.input().as_ref(), hex!("1b55ba3a")); let encoded = tx.encoded_2718(); assert_eq!(encoded.as_ref(), data.to_vec()); } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index b2d3bccde6b..5772af0dc79 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -21,6 +21,9 @@ reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } + +# alloy +alloy-eips.workspace = true alloy-primitives.workspace = true # revm diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index a63681aa132..be3ef0a3782 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,9 +1,10 @@ //! Helper for handling execution of multiple blocks. use alloc::vec::Vec; +use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, Address, BlockNumber}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; -use reth_primitives::{Receipt, Receipts, Request, Requests}; +use reth_primitives::{Receipt, Receipts}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; @@ -170,8 +171,8 @@ impl BlockBatchRecord { } /// Save EIP-7685 requests to the executor. - pub fn save_requests(&mut self, requests: Vec) { - self.requests.push(requests.into()); + pub fn save_requests(&mut self, requests: Requests) { + self.requests.push(requests); } } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index eedada8ffa7..e89f7b8d398 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -11,8 +11,8 @@ use alloy_rpc_types::{ }; use alloy_rpc_types_engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, - ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, - PayloadStatus, TransitionConfiguration, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, }; use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; @@ -54,9 +54,10 @@ pub trait EngineApi { #[method(name = "newPayloadV4")] async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Vec, ) -> RpcResult; /// See also diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index fb7f98ed203..d0b19a7b4d6 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -2,11 +2,11 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; use alloy_eips::eip4844::BlobAndProofV1; -use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; +use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, + ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -140,7 +140,7 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) } /// See also @@ -156,7 +156,7 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) } /// See also @@ -178,15 +178,18 @@ where let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields), None).await?) } /// See also pub async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + // TODO(onbjerg): Figure out why we even get these here, since we'll check the requests + // from execution against the requests root in the header. + execution_requests: Vec, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = @@ -200,7 +203,13 @@ where let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary + // workaround. + Ok(self + .inner + .beacon_consensus + .new_payload(payload, Some(cancun_fields), Some(execution_requests)) + .await?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -370,7 +379,7 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV4"); + warn!("could not transform built payload into ExecutionPayloadV3"); EngineApiError::UnknownPayload }) } @@ -665,15 +674,22 @@ where /// See also async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Vec, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root).await; + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v4.record(elapsed); self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index c08c30c1de0..007a62db045 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -75,7 +75,7 @@ fn payload_validation() { b }); - assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None), Ok(_)); + assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None, None), Ok(_)); // Invalid extra data let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); @@ -84,7 +84,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block,None), + try_into_sealed_block(invalid_extra_data_block, None, None), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -94,8 +94,7 @@ fn payload_validation() { b }); assert_matches!( - - try_into_sealed_block(block_with_zero_base_fee,None), + try_into_sealed_block(block_with_zero_base_fee, None, None), Err(PayloadError::BaseFee(val)) if val.is_zero() ); @@ -114,8 +113,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_ommers.clone(),None), - + try_into_sealed_block(block_with_ommers.clone(), None, None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -126,9 +124,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_difficulty.clone(),None), + try_into_sealed_block(block_with_difficulty.clone(), None, None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() - ); // None zero nonce @@ -137,7 +134,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_nonce.clone(),None), + try_into_sealed_block(block_with_nonce.clone(), None, None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() ); diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 832cf17055a..5e12e41e550 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,7 +5,8 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; -use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::eip7685::EMPTY_REQUESTS_HASH; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -19,7 +20,7 @@ use reth_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, + Block, BlockBody, Header, Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ @@ -417,14 +418,9 @@ pub trait LoadPendingBlock: EthApiTypes { let blob_gas_used = (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then_some(sum_blob_gas_used); - // note(onbjerg): the rpc spec has not been changed to include requests, so for now we just - // set these to empty - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(block_env.timestamp.to::()) { - (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) - } else { - (None, None) - }; + let requests_hash = chain_spec + .is_prague_active_at_timestamp(block_env.timestamp.to::()) + .then_some(EMPTY_REQUESTS_HASH); let header = Header { parent_hash, @@ -447,7 +443,7 @@ pub trait LoadPendingBlock: EthApiTypes { excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), parent_beacon_block_root, - requests_root, + requests_hash, }; // Convert Vec> to Vec @@ -456,7 +452,7 @@ pub trait LoadPendingBlock: EthApiTypes { // seal the block let block = Block { header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 2668291e2c8..c3232f2383b 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -102,8 +102,6 @@ impl ReceiptBuilder { gas_used: gas_used as u128, contract_address, effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // TODO pre-byzantium receipts have a post-transaction state root - state_root: None, // EIP-4844 fields blob_gas_price, blob_gas_used: blob_gas_used.map(u128::from), diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index fc8ea9e1c48..a650a69c1c1 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -124,7 +124,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) blob_gas_used, excess_blob_gas, parent_beacon_block_root, - requests_root, + requests_hash, } = header; Header { @@ -150,7 +150,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas, parent_beacon_block_root, total_difficulty: None, - requests_root, + requests_hash, } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 3bbee2b00ea..b63b7453aeb 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -2,16 +2,18 @@ //! Ethereum's Engine use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_eips::{ + eip2718::{Decodable2718, Encodable2718}, + eip7685::Requests, +}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, + Block, BlockBody, Header, SealedBlock, TransactionSigned, Withdrawals, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] @@ -67,7 +69,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result { - let ExecutionPayloadV4 { - payload_inner, - deposit_requests, - withdrawal_requests, - consolidation_requests, - } = payload; - let mut block = try_payload_v3_to_block(payload_inner)?; - - // attach requests with asc type identifiers - let requests = deposit_requests - .into_iter() - .map(Request::DepositRequest) - .chain(withdrawal_requests.into_iter().map(Request::WithdrawalRequest)) - .chain(consolidation_requests.into_iter().map(Request::ConsolidationRequest)) - .collect::>(); - - let requests_root = proofs::calculate_requests_root(&requests); - block.header.requests_root = Some(requests_root); - block.body.requests = Some(requests.into()); - - Ok(block) -} - /// Converts [`SealedBlock`] to [`ExecutionPayload`] pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { - if value.header.requests_root.is_some() { + if value.header.requests_hash.is_some() { // block with requests root: V3 - ExecutionPayload::V4(block_to_payload_v4(value)) + ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) @@ -217,37 +194,6 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { } } -/// Converts [`SealedBlock`] to [`ExecutionPayloadV4`] -pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - value.body.requests.take().unwrap_or_default().into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - ExecutionPayloadV4 { - deposit_requests, - withdrawal_requests, - consolidation_requests, - payload_inner: block_to_payload_v3(value), - } -} - /// Converts [`SealedBlock`] to [`ExecutionPayloadFieldV2`] pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 @@ -312,15 +258,16 @@ pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayload pub fn try_into_block( value: ExecutionPayload, parent_beacon_block_root: Option, + execution_requests: Option, ) -> Result { let mut base_payload = match value { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, - ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; base_payload.header.parent_beacon_block_root = parent_beacon_block_root; + base_payload.header.requests_hash = execution_requests.map(|reqs| reqs.requests_hash()); Ok(base_payload) } @@ -338,9 +285,10 @@ pub fn try_into_block( pub fn try_into_sealed_block( payload: ExecutionPayload, parent_beacon_block_root: Option, + execution_requests: Option, ) -> Result { let block_hash = payload.block_hash(); - let base_payload = try_into_block(payload, parent_beacon_block_root)?; + let base_payload = try_into_block(payload, parent_beacon_block_root, execution_requests)?; // validate block hash and return validate_block_hash(block_hash, base_payload) @@ -404,13 +352,12 @@ pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPaylo #[cfg(test)] mod tests { use super::{ - block_to_payload_v3, try_into_block, try_payload_v3_to_block, try_payload_v4_to_block, - validate_block_hash, + block_to_payload_v3, try_into_block, try_payload_v3_to_block, validate_block_hash, }; use alloy_primitives::{b256, hex, Bytes, U256}; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, + ExecutionPayloadV3, }; #[test] @@ -628,60 +575,10 @@ mod tests { let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; // convert into block - let block = try_into_block(payload, Some(cancun_fields.parent_beacon_block_root)).unwrap(); + let block = + try_into_block(payload, Some(cancun_fields.parent_beacon_block_root), None).unwrap(); // Ensure the actual hash is calculated if we set the fields to what they should be validate_block_hash(block_hash_with_blob_fee_fields, block).unwrap(); } - - #[test] - fn parse_payload_v4() { - let s = r#"{ - "baseFeePerGas": "0x2ada43", - "blobGasUsed": "0x0", - "blockHash": "0x86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d", - "blockNumber": "0x2c", - "depositRequests": [ - { - "amount": "0xe8d4a51000", - "index": "0x0", - "pubkey": "0xaab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c464", - "signature": "0xa889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0c", - "withdrawalCredentials": "0x00ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c80" - }, - { - "amount": "0xe8d4a51000", - "index": "0x1", - "pubkey": "0xb0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b", - "signature": "0xb9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56", - "withdrawalCredentials": "0x002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d62513" - } - ], - "excessBlobGas": "0x0", - "extraData": "0x726574682f76302e322e302d626574612e372f6c696e7578", - "feeRecipient": "0x8943545177806ed17b9f23f0a21ee5948ecaa776", - "gasLimit": "0x1855e85", - "gasUsed": "0x25f98", - "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", - "parentHash": "0xd753194ef19b5c566b7eca6e9ebcca03895b548e1e93a20a23d922ba0bc210d4", - "prevRandao": "0x8c52256fd491776dc32f531ad4c0dc1444684741bca15f54c9cd40c60142df90", - "receiptsRoot": "0x510e7fb94279897e5dcd6c1795f6137d8fe02e49e871bfea7999fd21a89f66aa", - "stateRoot": "0x59ae0706a2b47162666fc7af3e30ff7aa34154954b68cc6aed58c3af3d58c9c2", - "timestamp": "0x6643c5a9", - "transactions": [ - "0x02f9021e8330182480843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012049f42823819771c6bbbd9cb6649850083fd3b6e5d0beb1069342c32d65a3b0990000000000000000000000000000000000000000000000000000000000000030aab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c46400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c800000000000000000000000000000000000000000000000000000000000000060a889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0cc080a0db786f0d89923949e533680524f003cebd66f32fbd30429a6b6bfbd3258dcf60a05241c54e05574765f7ddc1a742ae06b044edfe02bffb202bf172be97397eeca9", - "0x02f9021e8330182401843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120d694d6a0b0103651aafd87db6c88297175d7317c6e6da53ccf706c3c991c91fd0000000000000000000000000000000000000000000000000000000000000030b0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d625130000000000000000000000000000000000000000000000000000000000000060b9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56c080a099dc5b94a51e9b91a6425b1fed9792863006496ab71a4178524819d7db0c5e88a0119748e62700234079d91ae80f4676f9e0f71b260e9b46ef9b4aff331d3c2318" - ], - "withdrawalRequests": [], - "withdrawals": [], - "consolidationRequests": [] - }"#; - - let payload = serde_json::from_str::(s).unwrap(); - let mut block = try_payload_v4_to_block(payload).unwrap(); - block.header.parent_beacon_block_root = - Some(b256!("d9851db05fa63593f75e2b12c4bba9f47740613ca57da3b523a381b8c27f3297")); - let hash = block.seal_slow().hash(); - assert_eq!(hash, b256!("86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d")) - } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 164f402e44c..66465ef474a 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -686,7 +686,7 @@ where let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; - Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys: Some(keys) }) + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) }) .await } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 45722978f9f..024bdd172fb 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -261,7 +261,6 @@ where from: receipt.from(), to: receipt.to(), contract_address: receipt.contract_address(), - state_root: receipt.state_root(), authorization_list: receipt .authorization_list() .map(<[SignedAuthorization]>::to_vec), diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index d40cdc5cdbb..47aaac0bbfd 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -101,7 +101,7 @@ where entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { - to: tx.to().into(), + to: tx.to(), value: tx.value(), gas: tx.gas_limit() as u128, gas_price: tx.transaction.max_fee_per_gas(), diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 2d441dee292..93d8a122992 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -121,7 +121,6 @@ where let mut tx_block_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); @@ -234,13 +233,6 @@ where .append(block_number, StoredBlockWithdrawals { withdrawals })?; } } - - // Write requests if any - if let Some(requests) = block.body.requests { - if !requests.0.is_empty() { - requests_cursor.append(block_number, requests)?; - } - } } BlockResponse::Empty(_) => {} }; @@ -276,7 +268,6 @@ where let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -296,11 +287,6 @@ where withdrawals_cursor.delete_current()?; } - // Delete the requests entry if any - if requests_cursor.seek_exact(number)?.is_some() { - requests_cursor.delete_current()?; - } - // Delete all transaction to block values. if !block_meta.is_empty() && tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 3efe1359062..2b013c0d3c5 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -13,7 +13,7 @@ use reth_codecs_derive::add_arbitrary_tests; #[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub(crate) struct Authorization { - chain_id: U256, + chain_id: u64, address: Address, nonce: u64, } @@ -78,7 +78,7 @@ mod tests { #[test] fn test_roundtrip_compact_authorization_list_item() { let authorization = AlloyAuthorization { - chain_id: U256::from(1), + chain_id: 1u64, address: address!("dac17f958d2ee523a2206206994597c13d831ec7"), nonce: 1, } diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 623eded9ee9..90e67b1e312 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -45,7 +45,7 @@ pub(crate) struct Header { #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] pub(crate) struct HeaderExt { - requests_root: Option, + requests_hash: Option, } impl HeaderExt { @@ -53,7 +53,7 @@ impl HeaderExt { /// /// Required since [`Header`] uses `Option` as a field. const fn into_option(self) -> Option { - if self.requests_root.is_some() { + if self.requests_hash.is_some() { Some(self) } else { None @@ -66,7 +66,7 @@ impl Compact for AlloyHeader { where B: bytes::BufMut + AsMut<[u8]>, { - let extra_fields = HeaderExt { requests_root: self.requests_root }; + let extra_fields = HeaderExt { requests_hash: self.requests_hash }; let header = Header { parent_hash: self.parent_hash, @@ -116,7 +116,7 @@ impl Compact for AlloyHeader { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_root: header.extra_fields.and_then(|h| h.requests_root), + requests_hash: header.extra_fields.and_then(|h| h.requests_hash), extra_data: header.extra_data, }; (alloy_header, buf) @@ -176,7 +176,7 @@ mod tests { #[test] fn test_extra_fields() { let mut header = HOLESKY_BLOCK; - header.extra_fields = Some(HeaderExt { requests_root: Some(B256::random()) }); + header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()) }); let mut encoded_header = vec![]; let len = header.to_compact(&mut encoded_header); diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 942258d0647..ed77876c5ce 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -3,7 +3,6 @@ mod authorization_list; mod genesis_account; mod header; mod log; -mod request; mod signature; mod transaction; mod trie; @@ -14,7 +13,6 @@ mod withdrawal; mod tests { use crate::{ alloy::{ - authorization_list::Authorization, genesis_account::{GenesisAccount, GenesisAccountRef, StorageEntries, StorageEntry}, header::{Header, HeaderExt}, transaction::{ @@ -38,7 +36,6 @@ mod tests { validate_bitflag_backwards_compat!(StorageEntries, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageEntry, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Authorization, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccountRef<'_>, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccount, UnusedBits::NotZero); validate_bitflag_backwards_compat!(TxEip1559, UnusedBits::NotZero); diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs deleted file mode 100644 index 2447160beb6..00000000000 --- a/crates/storage/codecs/src/alloy/request.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Native Compact codec impl for EIP-7685 requests. - -use crate::Compact; -use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_primitives::Bytes; -use bytes::BufMut; - -impl Compact for Request { - fn to_compact(&self, buf: &mut B) -> usize - where - B: BufMut + AsMut<[u8]>, - { - let encoded: Bytes = self.encoded_7685().into(); - encoded.to_compact(buf) - } - - fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { - let (raw, buf) = Bytes::from_compact(buf, buf.len()); - - (Self::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use proptest::proptest; - use proptest_arbitrary_interop::arb; - - proptest! { - #[test] - fn roundtrip(request in arb::()) { - let mut buf = Vec::::new(); - request.to_compact(&mut buf); - let (decoded, _) = Request::from_compact(&buf, buf.len()); - assert_eq!(request, decoded); - } - } -} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 0f35a558a35..b077027f297 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,7 +8,7 @@ use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; use reth_primitives::{ - Account, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, TxType, + Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -230,7 +230,6 @@ impl_compression_for_compact!( StageCheckpoint, PruneCheckpoint, ClientVersion, - Requests, // Non-DB GenesisAccount ); @@ -366,6 +365,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } } diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 83a063903e0..27f58f8a1f3 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -30,9 +30,7 @@ use reth_db_api::{ }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{ - Account, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, -}; +use reth_primitives::{Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash}; use reth_primitives_traits::IntegerList; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -404,9 +402,6 @@ tables! { /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. table VersionHistory; - /// Stores EIP-7685 EL -> CL requests, indexed by block number. - table BlockRequests; - /// Stores generic chain state info, like the last finalized block. table ChainState; } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9e6f32b33a3..1866610e3f2 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -4,7 +4,7 @@ use crate::{ CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, + StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; @@ -1200,24 +1200,6 @@ impl WithdrawalsProvider for BlockchainProvider2 { } } -impl RequestsProvider for BlockchainProvider2 { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if !self.chain_spec().is_prague_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.requests_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.requests.clone()), - ) - } -} - impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -1747,8 +1729,8 @@ mod tests { use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, - ReceiptProviderIdExt, RequestsProvider, StateProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + ReceiptProviderIdExt, StateProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -2849,37 +2831,6 @@ mod tests { Ok(()) } - #[test] - fn test_requests_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().prague_activated().build()); - let (provider, database_blocks, in_memory_blocks, _) = - provider_with_chain_spec_and_random_blocks( - &mut rng, - chain_spec.clone(), - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { requests_count: Some(1..2), ..Default::default() }, - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - - let prague_timestamp = - chain_spec.hardforks.fork(EthereumHardfork::Prague).as_timestamp().unwrap(); - - assert_eq!( - provider.requests_by_block(database_block.number.into(), prague_timestamp,)?, - database_block.body.requests.clone() - ); - assert_eq!( - provider.requests_by_block(in_memory_block.number.into(), prague_timestamp,)?, - in_memory_block.body.requests.clone() - ); - - Ok(()) - } - #[test] fn test_state_provider_factory() -> eyre::Result<()> { let mut rng = generators::rng(); diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index ba43298c36b..7e9ee7202c0 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -61,7 +61,6 @@ pub(crate) enum Action { InsertTransactions, InsertTransactionHashNumbers, InsertBlockWithdrawals, - InsertBlockRequests, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -106,8 +105,6 @@ struct DatabaseProviderMetrics { insert_tx_hash_numbers: Histogram, /// Duration of insert block withdrawals insert_block_withdrawals: Histogram, - /// Duration of insert block requests - insert_block_requests: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -139,7 +136,6 @@ impl DatabaseProviderMetrics { Action::InsertTransactions => self.insert_transactions.record(duration), Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), - Action::InsertBlockRequests => self.insert_block_requests.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 520b514527b..54186dca6f6 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -4,8 +4,8 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, RequestsProvider, StageCheckpointReader, StateProviderBox, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, + TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -519,16 +519,6 @@ impl WithdrawalsProvider for ProviderFactory { } } -impl RequestsProvider for ProviderFactory { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.provider()?.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8140700faba..308fa364a3d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -11,10 +11,10 @@ use crate::{ DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, + StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, + StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -40,7 +40,7 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, Requests, + Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, Withdrawal, Withdrawals, @@ -500,7 +500,6 @@ impl DatabaseProvider { Vec
, Vec
, Option, - Option, ) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; @@ -509,7 +508,6 @@ impl DatabaseProvider { let ommers = self.ommers(block_number.into())?.unwrap_or_default(); let withdrawals = self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - let requests = self.requests_by_block(block_number.into(), header.as_ref().timestamp)?; // Get the block body // @@ -540,7 +538,7 @@ impl DatabaseProvider { }) .collect(); - construct_block(header, body, senders, ommers, withdrawals, requests) + construct_block(header, body, senders, ommers, withdrawals) } /// Returns a range of blocks from the database. @@ -551,7 +549,6 @@ impl DatabaseProvider { /// - Range of transaction numbers /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_range( &self, @@ -563,13 +560,7 @@ impl DatabaseProvider { Spec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut( - H, - Range, - Vec
, - Option, - Option, - ) -> ProviderResult, + F: FnMut(H, Range, Vec
, Option) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -581,7 +572,6 @@ impl DatabaseProvider { let headers = headers_range(range)?; let mut ommers_cursor = self.tx.cursor_read::()?; let mut withdrawals_cursor = self.tx.cursor_read::()?; - let mut requests_cursor = self.tx.cursor_read::()?; let mut block_body_cursor = self.tx.cursor_read::()?; for header in headers { @@ -608,13 +598,6 @@ impl DatabaseProvider { } else { None }; - let requests = - if self.chain_spec.is_prague_active_at_timestamp(header_ref.timestamp) { - (requests_cursor.seek_exact(header_ref.number)?.unwrap_or_default().1) - .into() - } else { - None - }; let ommers = if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { Vec::new() @@ -625,7 +608,7 @@ impl DatabaseProvider { .unwrap_or_default() }; - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals, requests) { + if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals) { blocks.push(b); } } @@ -643,7 +626,6 @@ impl DatabaseProvider { /// - Transactions /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_with_senders_range( &self, @@ -660,14 +642,13 @@ impl DatabaseProvider { Vec, Vec
, Option, - Option, Vec
, ) -> ProviderResult, { let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals, requests| { + self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { let (body, senders) = if tx_range.is_empty() { (Vec::new(), Vec::new()) } else { @@ -699,7 +680,7 @@ impl DatabaseProvider { (body, senders) }; - assemble_block(header, body, ommers, withdrawals, requests, senders) + assemble_block(header, body, ommers, withdrawals, senders) }) } @@ -781,7 +762,6 @@ impl DatabaseProvider { // - Bodies (transactions) // - Uncles/ommers // - Withdrawals - // - Requests // - Signers let block_headers = self.get::(range.clone())?; @@ -792,7 +772,6 @@ impl DatabaseProvider { let block_header_hashes = self.get::(range.clone())?; let block_ommers = self.get::(range.clone())?; let block_withdrawals = self.get::(range.clone())?; - let block_requests = self.get::(range.clone())?; let block_tx = self.get_block_transaction_range(range)?; let mut blocks = Vec::with_capacity(block_headers.len()); @@ -805,10 +784,8 @@ impl DatabaseProvider { // Ommers can be empty for some blocks let mut block_ommers_iter = block_ommers.into_iter(); let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); let mut block_ommers = block_ommers_iter.next(); let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) @@ -841,24 +818,10 @@ impl DatabaseProvider { withdrawals = None } - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } - } - } else { - requests = None; - } - blocks.push(SealedBlockWithSenders { block: SealedBlock { header, - body: BlockBody { transactions, ommers, withdrawals, requests }, + body: BlockBody { transactions, ommers, withdrawals }, }, senders, }) @@ -1222,7 +1185,6 @@ impl DatabaseProvider { /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`BlockOmmers`](tables::BlockOmmers) /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) /// /// This will also remove transaction data according to @@ -1242,7 +1204,6 @@ impl DatabaseProvider { self.remove::(range.clone())?; self.remove::(range.clone())?; self.remove::(range.clone())?; - self.remove::(range.clone())?; self.remove_block_transaction_range(range.clone())?; self.remove::(range)?; @@ -1256,7 +1217,6 @@ impl DatabaseProvider { /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`BlockOmmers`](tables::BlockOmmers) /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) /// /// This will also remove transaction data according to @@ -1274,7 +1234,6 @@ impl DatabaseProvider { // - Bodies (transactions) // - Uncles/ommers // - Withdrawals - // - Requests // - Signers let block_headers = self.take::(range.clone())?; @@ -1288,7 +1247,6 @@ impl DatabaseProvider { let block_header_hashes = self.take::(range.clone())?; let block_ommers = self.take::(range.clone())?; let block_withdrawals = self.take::(range.clone())?; - let block_requests = self.take::(range.clone())?; let block_tx = self.take_block_transaction_range(range.clone())?; let mut blocks = Vec::with_capacity(block_headers.len()); @@ -1304,10 +1262,8 @@ impl DatabaseProvider { // Ommers can be empty for some blocks let mut block_ommers_iter = block_ommers.into_iter(); let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); let mut block_ommers = block_ommers_iter.next(); let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) @@ -1340,24 +1296,10 @@ impl DatabaseProvider { withdrawals = None } - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } - } - } else { - requests = None; - } - blocks.push(SealedBlockWithSenders { block: SealedBlock { header, - body: BlockBody { transactions, ommers, withdrawals, requests }, + body: BlockBody { transactions, ommers, withdrawals }, }, senders, }) @@ -1726,7 +1668,6 @@ impl BlockReader for DatabasePr if let Some(header) = self.header_by_number(number)? { let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; let ommers = self.ommers(number.into())?.unwrap_or_default(); - let requests = self.requests_by_block(number.into(), header.timestamp)?; // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough @@ -1738,7 +1679,7 @@ impl BlockReader for DatabasePr return Ok(Some(Block { header, - body: BlockBody { transactions, ommers, withdrawals, requests }, + body: BlockBody { transactions, ommers, withdrawals }, })) } } @@ -1798,8 +1739,8 @@ impl BlockReader for DatabasePr id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { - Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } + |header, transactions, senders, ommers, withdrawals| { + Block { header, body: BlockBody { transactions, ommers, withdrawals } } // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1819,17 +1760,14 @@ impl BlockReader for DatabasePr id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { - SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - } - // Note: we're using unchecked here because we know the block contains valid txs - // wrt to its height and can ignore the s value check so pre - // EIP-2 txs are allowed - .try_with_senders_unchecked(senders) - .map(Some) - .map_err(|_| ProviderError::SenderRecoveryError) + |header, transactions, senders, ommers, withdrawals| { + SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) }, ) } @@ -1839,7 +1777,7 @@ impl BlockReader for DatabasePr self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals, requests| { + |header, tx_range, ommers, withdrawals| { let transactions = if tx_range.is_empty() { Vec::new() } else { @@ -1848,10 +1786,7 @@ impl BlockReader for DatabasePr .map(Into::into) .collect() }; - Ok(Block { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }) + Ok(Block { header, body: BlockBody { transactions, ommers, withdrawals } }) }, ) } @@ -1863,8 +1798,8 @@ impl BlockReader for DatabasePr self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, requests, senders| { - Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } + |header, transactions, ommers, withdrawals, senders| { + Block { header, body: BlockBody { transactions, ommers, withdrawals } } .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1878,12 +1813,9 @@ impl BlockReader for DatabasePr self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, requests, senders| { + |header, transactions, ommers, withdrawals, senders| { SealedBlockWithSenders::new( - SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }, + SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } }, senders, ) .ok_or(ProviderError::SenderRecoveryError) @@ -2200,24 +2132,6 @@ impl WithdrawalsProvider } } -impl RequestsProvider - for DatabaseProvider -{ - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if self.chain_spec.is_prague_active_at_timestamp(timestamp) { - if let Some(number) = self.convert_hash_or_number(id)? { - let requests = self.tx.get::(number)?; - return Ok(requests) - } - } - Ok(None) - } -} - impl EvmEnvProvider for DatabaseProvider { @@ -3413,7 +3327,6 @@ impl(block_number, requests)?; - durations_recorder.record_relative(metrics::Action::InsertBlockRequests); - } - let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; self.tx.put::(block_number, block_indices.clone())?; durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 561e1d97436..b98bbf5be47 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -3,7 +3,7 @@ use crate::{ BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, + ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; @@ -504,16 +504,6 @@ impl WithdrawalsProvider for BlockchainProvider { } } -impl RequestsProvider for BlockchainProvider { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 6f5cf07c95c..20d6a1b184b 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -4,8 +4,8 @@ use super::{ }; use crate::{ to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, RequestsProvider, StageCheckpointReader, StatsReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -1642,17 +1642,6 @@ impl WithdrawalsProvider for StaticFileProvider { } } -impl RequestsProvider for StaticFileProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) - } -} - impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 2a70664b1b3..07486f5557c 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -40,7 +40,6 @@ pub fn assert_genesis_block( ); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index c7c94b939ac..08530acf0a7 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -2,9 +2,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, - EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, RequestsProvider, StateProvider, - StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use alloy_consensus::constants::EMPTY_ROOT_HASH; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -809,16 +809,6 @@ impl WithdrawalsProvider for MockEthProvider { } } -impl RequestsProvider for MockEthProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 0a205389c9b..f6f7e185de6 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -37,7 +37,7 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, + ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; @@ -539,16 +539,6 @@ impl WithdrawalsProvider for NoopProvider { } } -impl RequestsProvider for NoopProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl PruneCheckpointReader for NoopProvider { fn get_prune_checkpoint( &self, diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index a3b0cc7438f..01238be745e 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,6 @@ use crate::{ - BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, Sealable, B256}; @@ -52,7 +52,6 @@ pub trait BlockReader: + HeaderProvider + TransactionsProvider + ReceiptProvider - + RequestsProvider + WithdrawalsProvider + Send + Sync diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 3f93bbbde2f..4e589242a91 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -31,9 +31,6 @@ pub use prune_checkpoint::*; mod receipts; pub use receipts::*; -mod requests; -pub use requests::*; - mod stage_checkpoint; pub use stage_checkpoint::*; diff --git a/crates/storage/storage-api/src/requests.rs b/crates/storage/storage-api/src/requests.rs deleted file mode 100644 index 02818c429b6..00000000000 --- a/crates/storage/storage-api/src/requests.rs +++ /dev/null @@ -1,14 +0,0 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::Requests; -use reth_storage_errors::provider::ProviderResult; - -/// Client trait for fetching EIP-7685 [Requests] for blocks. -#[auto_impl::auto_impl(&, Arc)] -pub trait RequestsProvider: Send + Sync { - /// Get withdrawals by block id. - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult>; -} diff --git a/docs/crates/db.md b/docs/crates/db.md index 3ccfb72e344..79eeae5ee4f 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -61,7 +61,6 @@ There are many tables within the node, all used to store different types of data - StageCheckpointProgresses - PruneCheckpoints - VersionHistory -- BlockRequests - ChainState
@@ -283,7 +282,6 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -322,7 +320,7 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { } ``` -This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `BlockRequests`, `TransactionBlocks` tables. +This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `TransactionBlocks` tables. Then it gets a walker of the block body cursor, and then walk backwards through the cursor to delete the block body entries from the last block number to the block number specified in the `UnwindInput` struct. diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 3f3df15363a..30e5e5bb20c 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -87,7 +87,7 @@ pub struct Header { /// Parent beacon block root. pub parent_beacon_block_root: Option, /// Requests root. - pub requests_root: Option, + pub requests_hash: Option, } impl From
for SealedHeader { @@ -113,7 +113,7 @@ impl From
for SealedHeader { blob_gas_used: value.blob_gas_used.map(|v| v.to::()), excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, - requests_root: value.requests_root, + requests_hash: value.requests_hash, }; Self::new(header, value.hash) } diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 49a59ecf6ae..98bfeabdfb1 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -14,10 +14,12 @@ workspace = true [dependencies] reth-primitives = { workspace = true, features = ["secp256k1"] } -alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } + +[dev-dependencies] +alloy-eips.workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index d07af00ce4c..571727cb2fd 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,17 +1,14 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. use alloy_consensus::{Transaction as _, TxLegacy}; -use alloy_eips::{ - eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, -}; use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, Request, Requests, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, + proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, + StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, }; use secp256k1::{Keypair, Secp256k1}; use std::{ @@ -201,11 +198,6 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) let transactions_root = proofs::calculate_transaction_root(&transactions); let ommers_hash = proofs::calculate_ommers_root(&ommers); - let requests = block_params - .requests_count - .map(|count| (0..count).map(|_| random_request(rng)).collect::>()); - let requests_root = requests.as_ref().map(|requests| proofs::calculate_requests_root(requests)); - let withdrawals = block_params.withdrawals_count.map(|count| { (0..count) .map(|i| Withdrawal { @@ -226,7 +218,8 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) transactions_root, ommers_hash, base_fee_per_gas: Some(rng.gen()), - requests_root, + // TODO(onbjerg): Proper EIP-7685 request support + requests_hash: None, withdrawals_root, ..Default::default() } @@ -236,12 +229,7 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) SealedBlock { header: SealedHeader::new(header, seal), - body: BlockBody { - transactions, - ommers, - withdrawals: withdrawals.map(Withdrawals::new), - requests: requests.map(Requests), - }, + body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, } } @@ -470,31 +458,6 @@ pub fn random_log(rng: &mut R, address: Option
, topics_count: O ) } -/// Generate random request -pub fn random_request(rng: &mut R) -> Request { - let request_type = rng.gen_range(0..3); - match request_type { - 0 => Request::DepositRequest(DepositRequest { - pubkey: rng.gen(), - withdrawal_credentials: rng.gen(), - amount: rng.gen(), - signature: rng.gen(), - index: rng.gen(), - }), - 1 => Request::WithdrawalRequest(WithdrawalRequest { - source_address: rng.gen(), - validator_pubkey: rng.gen(), - amount: rng.gen(), - }), - 2 => Request::ConsolidationRequest(ConsolidationRequest { - source_address: rng.gen(), - source_pubkey: rng.gen(), - target_pubkey: rng.gen(), - }), - _ => panic!("invalid request type"), - } -} - #[cfg(test)] mod tests { use super::*; From 3793b907eadc620687f0b65327cdbd4c4e6b1abf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 19 Oct 2024 15:05:53 +0200 Subject: [PATCH 042/970] chore: better start finish persisted block logs (#11893) Co-authored-by: Oliver --- crates/engine/tree/src/tree/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c63c8fbe291..02802dff66c 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1145,6 +1145,7 @@ where if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); } else { + debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.block.num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); self.persistence_state.start(rx); @@ -1173,7 +1174,7 @@ where return Ok(()) }; - trace!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); + debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); self.persistence_state .finish(last_persisted_block_hash, last_persisted_block_number); self.on_new_persisted_block()?; From ddc5ac3fa750580eda224d37c086c4a7443728d4 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 19 Oct 2024 15:12:28 +0200 Subject: [PATCH 043/970] refactor(rpc): small refactor in `trace_filter` (#11894) --- crates/rpc/rpc/src/trace.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 8ac532ff341..b9b15b5366d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -314,13 +314,15 @@ where // add reward traces for all blocks for block in &blocks { if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { - let mut traces = self.extract_reward_traces( - &block.header, - &block.body.ommers, - base_block_reward, + all_traces.extend( + self.extract_reward_traces( + &block.header, + &block.body.ommers, + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), ); - traces.retain(|trace| matcher.matches(&trace.trace)); - all_traces.extend(traces); } else { // no block reward, means we're past the Paris hardfork and don't expect any rewards // because the blocks in ascending order From 1a1aa2f8c3acb4ae32b0d1dadc9221fdf75fabdd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 19 Oct 2024 15:18:20 +0200 Subject: [PATCH 044/970] feat: add map_pool fn (#11890) --- crates/payload/basic/src/lib.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7416283c1f5..70d22250ddb 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -799,6 +799,21 @@ impl BuildArguments(self, f: F) -> BuildArguments + where + F: FnOnce(Pool) -> P, + { + BuildArguments { + client: self.client, + pool: f(self.pool), + cached_reads: self.cached_reads, + config: self.config, + cancel: self.cancel, + best_payload: self.best_payload, + } + } } /// A trait for building payloads that encapsulate Ethereum transactions. From 1efa764b34f163e70890bbd54eac64ad2b6adcce Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 19 Oct 2024 09:29:29 -0400 Subject: [PATCH 045/970] chore(engine): rename enveloped associated types to envelope (#11812) Co-authored-by: Matthias Seitz --- crates/e2e-test-utils/src/engine_api.rs | 6 ++-- crates/e2e-test-utils/src/node.rs | 6 ++-- crates/engine/primitives/src/lib.rs | 30 ++++++++++++++++---- crates/ethereum/engine-primitives/src/lib.rs | 6 ++-- crates/optimism/node/src/engine.rs | 6 ++-- crates/rpc/rpc-api/src/engine.rs | 15 ++++++++-- crates/rpc/rpc-engine-api/src/engine_api.rs | 12 ++++---- examples/custom-engine-types/src/main.rs | 6 ++-- 8 files changed, 57 insertions(+), 30 deletions(-) diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 1b0ff9b54e7..f4aa8fdf5ff 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -29,7 +29,7 @@ impl EngineApiTestContext { pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> eyre::Result { + ) -> eyre::Result { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } @@ -50,10 +50,10 @@ impl EngineApiTestContext { versioned_hashes: Vec, ) -> eyre::Result where - E::ExecutionPayloadV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { // setup payload for submission - let envelope_v3: ::ExecutionPayloadV3 = payload.into(); + let envelope_v3: ::ExecutionPayloadEnvelopeV3 = payload.into(); // submit payload to engine api let submission = EngineApiClient::::new_payload_v3( diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index c22913ba236..776a437a58e 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -88,7 +88,7 @@ where attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + Copy, ) -> eyre::Result> where - Engine::ExecutionPayloadV3: From + PayloadEnvelopeExt, + Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, { let mut chain = Vec::with_capacity(length as usize); @@ -113,7 +113,7 @@ where attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { // trigger new payload building draining the pool @@ -135,7 +135,7 @@ where attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { let (payload, eth_attr) = self.new_payload(attributes_generator).await?; diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 2cf1366eb01..fab96b0d17e 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -24,9 +24,9 @@ use serde::{de::DeserializeOwned, ser::Serialize}; pub trait EngineTypes: PayloadTypes< BuiltPayload: TryInto - + TryInto - + TryInto - + TryInto, + + TryInto + + TryInto + + TryInto, > + DeserializeOwned + Serialize + 'static @@ -34,11 +34,29 @@ pub trait EngineTypes: /// Execution Payload V1 type. type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; /// Execution Payload V2 type. - type ExecutionPayloadV2: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + type ExecutionPayloadEnvelopeV2: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; /// Execution Payload V3 type. - type ExecutionPayloadV3: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + type ExecutionPayloadEnvelopeV3: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; /// Execution Payload V4 type. - type ExecutionPayloadV4: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + type ExecutionPayloadEnvelopeV4: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; } /// Type that validates the payloads sent to the engine. diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 034a8c6bffb..20a55883680 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -44,9 +44,9 @@ where + TryInto, { type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// A default payload type for [`EthEngineTypes`] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index a83f4c696a1..cec609671a3 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -38,9 +38,9 @@ where + TryInto, { type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = OpExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = OpExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; } /// A default payload type for [`OptimismEngineTypes`] diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index e89f7b8d398..458768c38b1 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -118,7 +118,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV2")] - async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v2( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Cancun payload handler which also returns a blobs bundle. /// @@ -128,7 +131,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV3")] - async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Prague payload handler. /// @@ -138,7 +144,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV4")] - async fn get_payload_v4(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also #[method(name = "getPayloadBodiesByHashV1")] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index d0b19a7b4d6..9450b2c9217 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -289,7 +289,7 @@ where pub async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -324,7 +324,7 @@ where pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -359,7 +359,7 @@ where pub async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -778,7 +778,7 @@ where async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); let start = Instant::now(); let res = Self::get_payload_v2(self, payload_id).await; @@ -798,7 +798,7 @@ where async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); let start = Instant::now(); let res = Self::get_payload_v3(self, payload_id).await; @@ -818,7 +818,7 @@ where async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); let start = Instant::now(); let res = Self::get_payload_v4(self, payload_id).await; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 135c4f3f247..46a7d7d9af9 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -157,9 +157,9 @@ impl PayloadTypes for CustomEngineTypes { impl EngineTypes for CustomEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// Custom engine validator From c803012085d65eb3111e942eea1c7af42ca75114 Mon Sep 17 00:00:00 2001 From: Oliver Date: Sat, 19 Oct 2024 17:17:14 +0200 Subject: [PATCH 046/970] chore: use `Requests` instead of `Vec` (#11895) --- Cargo.lock | 3 ++- crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/handle.rs | 4 ++-- crates/consensus/beacon/src/engine/message.rs | 4 ++-- crates/consensus/beacon/src/engine/mod.rs | 5 +++-- crates/engine/tree/src/tree/mod.rs | 5 +++-- crates/engine/util/Cargo.toml | 5 ++--- crates/engine/util/src/reorg.rs | 7 ++++--- crates/payload/validator/Cargo.toml | 1 - crates/payload/validator/src/lib.rs | 12 ++++-------- crates/rpc/rpc-api/src/engine.rs | 4 ++-- crates/rpc/rpc-engine-api/src/engine_api.rs | 8 ++++---- 12 files changed, 29 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82f42b07b16..b3a39cb5874 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6417,6 +6417,7 @@ dependencies = [ name = "reth-beacon-consensus" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", @@ -7217,6 +7218,7 @@ name = "reth-engine-util" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "eyre", @@ -8400,7 +8402,6 @@ name = "reth-payload-validator" version = "1.1.0" dependencies = [ "alloy-eips", - "alloy-primitives", "alloy-rpc-types", "reth-chainspec", "reth-primitives", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index f1366812608..192ae2b93df 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -31,6 +31,7 @@ reth-node-types.workspace = true reth-chainspec = { workspace = true, optional = true } # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 1f444590164..bb5c4dee174 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -4,7 +4,7 @@ use crate::{ engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; -use alloy_primitives::Bytes; +use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -48,7 +48,7 @@ where &self, payload: ExecutionPayload, cancun_fields: Option, - execution_requests: Option>, + execution_requests: Option, ) -> Result { let (tx, rx) = oneshot::channel(); // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 45d0c57f45e..56328f03db0 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,5 +1,5 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; -use alloy_primitives::Bytes; +use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, @@ -150,7 +150,7 @@ pub enum BeaconEngineMessage { // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary // workaround. /// The pectra EIP-7685 execution requests. - execution_requests: Option>, + execution_requests: Option, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index edd3d6db323..cff648b2843 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,5 @@ -use alloy_primitives::{BlockNumber, Bytes, B256}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, @@ -1087,7 +1088,7 @@ where cancun_fields: Option, // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary // workaround. - execution_requests: Option>, + execution_requests: Option, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 02802dff66c..a2abd3f531d 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, - BlockNumber, Bytes, B256, U256, + BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -70,6 +70,7 @@ use crate::{ engine::{EngineApiKind, EngineApiRequest}, tree::metrics::EngineApiMetrics, }; +use alloy_eips::eip7685::Requests; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -721,7 +722,7 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, - execution_requests: Option>, + execution_requests: Option, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index c11948b9405..35a7e74bb21 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -27,6 +27,7 @@ revm-primitives.workspace = true reth-trie.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -49,6 +50,4 @@ itertools.workspace = true tracing.workspace = true [features] -optimism = [ - "reth-beacon-consensus/optimism", -] +optimism = ["reth-beacon-consensus/optimism"] diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 90b5c90aa95..e07b18b9250 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,7 +1,8 @@ //! Stream wrapper that simulates reorgs. use alloy_consensus::Transaction; -use alloy_primitives::{Bytes, U256}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::U256; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, }; @@ -250,8 +251,8 @@ fn create_reorg_head( mut depth: usize, next_payload: ExecutionPayload, next_cancun_fields: Option, - next_execution_requests: Option>, -) -> RethResult<(ExecutionPayload, Option, Option>)> + next_execution_requests: Option, +) -> RethResult<(ExecutionPayload, Option, Option)> where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index a96799d7bce..619b99f28de 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -19,5 +19,4 @@ reth-rpc-types-compat.workspace = true # alloy alloy-eips.workspace = true -alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index fdcd9244a43..3ec7b206a5b 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use alloy_eips::eip7685::Requests; -use alloy_primitives::Bytes; use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; use reth_chainspec::EthereumHardforks; use reth_primitives::SealedBlock; @@ -114,17 +113,14 @@ impl ExecutionPayloadValidator { &self, payload: ExecutionPayload, cancun_fields: MaybeCancunPayloadFields, - execution_requests: Option>, + execution_requests: Option, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = try_into_block( - payload, - cancun_fields.parent_beacon_block_root(), - execution_requests.map(Requests::new), - )? - .seal_slow(); + let sealed_block = + try_into_block(payload, cancun_fields.parent_beacon_block_root(), execution_requests)? + .seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 458768c38b1..ddf6d846119 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,7 +3,7 @@ //! This contains the `engine_` namespace and the subset of the `eth_` namespace that is exposed to //! the consensus client. -use alloy_eips::{eip4844::BlobAndProofV1, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; use alloy_rpc_types::{ @@ -57,7 +57,7 @@ pub trait EngineApi { payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, - execution_requests: Vec, + execution_requests: Requests, ) -> RpcResult; /// See also diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 9450b2c9217..ca055a77ea1 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,8 +1,8 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::eip4844::BlobAndProofV1; -use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256, U64}; +use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests}; +use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, @@ -189,7 +189,7 @@ where parent_beacon_block_root: B256, // TODO(onbjerg): Figure out why we even get these here, since we'll check the requests // from execution against the requests root in the header. - execution_requests: Vec, + execution_requests: Requests, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = @@ -677,7 +677,7 @@ where payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, - execution_requests: Vec, + execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); let start = Instant::now(); From a78de201b349c10afef2403ea4e6e57fe075f7bf Mon Sep 17 00:00:00 2001 From: Gerson <71728860+Gerson2102@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:01:26 -0600 Subject: [PATCH 047/970] Refactor of state_change functionality (#11878) Co-authored-by: Matthias Seitz --- Cargo.lock | 6 +++--- crates/engine/invalid-block-hooks/src/witness.rs | 5 +++-- crates/engine/util/src/reorg.rs | 6 ++++-- crates/ethereum/evm/src/execute.rs | 2 +- crates/ethereum/evm/src/strategy.rs | 2 +- crates/evm/Cargo.toml | 2 ++ crates/evm/src/lib.rs | 1 + crates/{revm => evm}/src/state_change.rs | 2 ++ crates/optimism/evm/src/execute.rs | 6 ++---- crates/optimism/evm/src/strategy.rs | 2 +- crates/payload/basic/Cargo.toml | 2 +- crates/payload/basic/src/lib.rs | 2 +- crates/revm/Cargo.toml | 2 -- crates/revm/src/lib.rs | 3 --- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 11 ++++++----- 15 files changed, 28 insertions(+), 26 deletions(-) rename crates/{revm => evm}/src/state_change.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index b3a39cb5874..57dc9176b81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6400,12 +6400,12 @@ dependencies = [ "futures-util", "metrics", "reth-chainspec", + "reth-evm", "reth-metrics", "reth-payload-builder", "reth-payload-primitives", "reth-primitives", "reth-provider", - "reth-revm", "reth-tasks", "reth-transaction-pool", "revm", @@ -7427,6 +7427,8 @@ dependencies = [ "parking_lot 0.12.3", "reth-chainspec", "reth-consensus", + "reth-consensus-common", + "reth-ethereum-forks", "reth-execution-errors", "reth-execution-types", "reth-metrics", @@ -8585,8 +8587,6 @@ version = "1.1.0" dependencies = [ "alloy-eips", "alloy-primitives", - "reth-chainspec", - "reth-consensus-common", "reth-ethereum-forks", "reth-execution-errors", "reth-primitives", diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index ab73a81904d..416c4adb40f 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -6,14 +6,15 @@ use eyre::OptionExt; use pretty_assertions::Comparison; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; +use reth_evm::{ + state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, +}; use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::states::bundle_state::BundleRetention, primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - state_change::post_block_balance_increments, DatabaseCommit, StateBuilder, }; use reth_rpc_api::DebugApiClient; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index e07b18b9250..85216e32fad 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -12,14 +12,16 @@ use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnFork use reth_engine_primitives::EngineTypes; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, +}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, State}, - state_change::post_block_withdrawals_balance_increments, DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 428458fcd04..b4a90d40990 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -16,6 +16,7 @@ use reth_evm::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + state_change::post_block_balance_increments, system_calls::{NoopHook, OnStateHook, SystemCaller}, ConfigureEvm, }; @@ -25,7 +26,6 @@ use reth_prune_types::PruneModes; use reth_revm::{ batch::BlockBatchRecord, db::{states::bundle_state::BundleRetention, State}, - state_change::post_block_balance_increments, Evm, }; use revm_primitives::{ diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs index 714f673c858..55fbfffc8e9 100644 --- a/crates/ethereum/evm/src/strategy.rs +++ b/crates/ethereum/evm/src/strategy.rs @@ -16,13 +16,13 @@ use reth_evm::{ BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ProviderError, }, + state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, ConfigureEvmEnv, }; use reth_primitives::{BlockWithSenders, Header, Receipt}; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, - state_change::post_block_balance_increments, Database, DatabaseCommit, State, }; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256}; diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 6081eae420c..6c16973b28b 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-consensus.workspace = true +reth-consensus-common.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-metrics = { workspace = true, optional = true } @@ -37,6 +38,7 @@ parking_lot = { workspace = true, optional = true } [dev-dependencies] parking_lot.workspace = true +reth-ethereum-forks.workspace = true [features] default = ["std"] diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 66026a07c94..b75feea83a1 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -31,6 +31,7 @@ pub mod execute; pub mod metrics; pub mod noop; pub mod provider; +pub mod state_change; pub mod system_calls; #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/revm/src/state_change.rs b/crates/evm/src/state_change.rs similarity index 99% rename from crates/revm/src/state_change.rs rename to crates/evm/src/state_change.rs index afe92561bcd..2d520901527 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,3 +1,5 @@ +//! State changes that are not related to transactions. + use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 3a86f5bbae4..d15cdee13d6 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -14,6 +14,7 @@ use reth_evm::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + state_change::post_block_balance_increments, system_calls::{NoopHook, OnStateHook, SystemCaller}, ConfigureEvm, }; @@ -22,10 +23,7 @@ use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts, TxType}; use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, - state_change::post_block_balance_increments, Evm, State, -}; +use reth_revm::{batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, Evm, State}; use revm_primitives::{ db::{Database, DatabaseCommit}, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs index 199a8a4d327..c626bb66587 100644 --- a/crates/optimism/evm/src/strategy.rs +++ b/crates/optimism/evm/src/strategy.rs @@ -12,6 +12,7 @@ use reth_evm::{ BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ProviderError, }, + state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, ConfigureEvmEnv, }; @@ -21,7 +22,6 @@ use reth_optimism_forks::OptimismHardfork; use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, - state_change::post_block_balance_increments, Database, State, }; use revm_primitives::{ diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index f201df0c1bd..9047768892a 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -15,12 +15,12 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true +reth-evm.workspace = true # ethereum alloy-rlp.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 70d22250ddb..fcc8be9a88e 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -14,6 +14,7 @@ use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm::state_change::post_block_withdrawals_balance_increments; use reth_payload_builder::{ database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, }; @@ -27,7 +28,6 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; -use reth_revm::state_change::post_block_withdrawals_balance_increments; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{Database, State}; diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 5772af0dc79..668abb79e38 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -13,11 +13,9 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 5515357d0d2..02eb182ee11 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -16,9 +16,6 @@ pub mod database; pub mod batch; -/// State changes that are not related to transactions. -pub mod state_change; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 5e12e41e550..407ddf1874a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -11,7 +11,10 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, ConfigureEvmEnv, +}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, @@ -27,9 +30,7 @@ use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, ReceiptProvider, StateProviderFactory, }; -use reth_revm::{ - database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use reth_trie::HashedPostState; @@ -157,7 +158,7 @@ pub trait LoadPendingBlock: EthApiTypes { pending.origin.header().hash() == pending_block.block.parent_hash && now <= pending_block.expires_at { - return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))) + return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); } } From f8969cbbc2afc9c77b02ac2711f2ba7fc83e2c95 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 19 Oct 2024 18:14:02 +0200 Subject: [PATCH 048/970] docs: add hardfork checklist (#11897) Co-authored-by: Oliver --- HARDFORK-CHECKLIST.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 HARDFORK-CHECKLIST.md diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md new file mode 100644 index 00000000000..80ebfc20c98 --- /dev/null +++ b/HARDFORK-CHECKLIST.md @@ -0,0 +1,21 @@ +# Non-exhaustive checklist for integrating new changes for an upcoming hard fork/devnet + +## Introducing new EIP types or changes to primitive types + +- Make required changes to primitive data structures on [alloy](https://github.com/alloy-rs/alloy) +- All new EIP data structures/constants/helpers etc. go into the `alloy-eips` crate at first. +- New transaction types go into `alloy-consensus` +- If there are changes to existing data structures, such as `Header` or `Block`, apply them to the types in `alloy-consensus` (e.g. new `request_hashes` field in Prague) + +## Engine API + +- If there are changes to the engine API (e.g. a new `engine_newPayloadVx` and `engine_getPayloadVx` pair) add the new types to the `alloy-rpc-types-engine` crate. +- If there are new parameters to the `engine_newPayloadVx` endpoint, add them to the `ExecutionPayloadSidecar` container type. This types contains all additional parameters that are required to convert an `ExecutionPayload` to an EL block. + +## Reth changes + +### Updates to the engine API + +- Add new endpoints to the `EngineApi` trait and implement endpoints. +- Update the `ExceuctionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. \ No newline at end of file From e2ecb6224dae896653ac2f624abc46b328b7a88b Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 19 Oct 2024 23:54:07 +0700 Subject: [PATCH 049/970] chore: remove unused deps (#11898) --- Cargo.lock | 52 --------------------- bin/reth/Cargo.toml | 1 - book/sources/exex/remote/Cargo.toml | 10 ++-- crates/e2e-test-utils/Cargo.toml | 8 +--- crates/engine/tree/Cargo.toml | 3 +- crates/ethereum-forks/Cargo.toml | 1 - crates/ethereum/node/Cargo.toml | 2 - crates/exex/exex/Cargo.toml | 6 ++- crates/net/network-types/Cargo.toml | 2 +- crates/node/core/Cargo.toml | 1 - crates/node/metrics/Cargo.toml | 1 - crates/optimism/node/Cargo.toml | 13 ------ crates/optimism/primitives/Cargo.toml | 1 - crates/primitives/Cargo.toml | 1 - crates/rpc/rpc-api/Cargo.toml | 5 +- crates/rpc/rpc-builder/Cargo.toml | 2 - crates/stages/stages/Cargo.toml | 7 ++- crates/static-file/static-file/Cargo.toml | 3 -- crates/storage/codecs/Cargo.toml | 2 - crates/storage/db-models/Cargo.toml | 8 +--- crates/storage/db/Cargo.toml | 1 - crates/trie/db/Cargo.toml | 11 ----- crates/trie/parallel/Cargo.toml | 7 ++- crates/trie/sparse/Cargo.toml | 6 --- crates/trie/trie/Cargo.toml | 7 --- examples/custom-rlpx-subprotocol/Cargo.toml | 2 - examples/polygon-p2p/Cargo.toml | 1 - 27 files changed, 23 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57dc9176b81..62ff16bb9d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2881,8 +2881,6 @@ dependencies = [ "reth-network", "reth-network-api", "reth-node-ethereum", - "reth-primitives", - "reth-provider", "tokio", "tokio-stream", "tracing", @@ -2972,7 +2970,6 @@ dependencies = [ "reth-discv4", "reth-network", "reth-primitives", - "reth-provider", "reth-tracing", "secp256k1", "serde_json", @@ -6312,7 +6309,6 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-db-api", - "reth-discv4", "reth-downloaders", "reth-engine-util", "reth-errors", @@ -6708,7 +6704,6 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", - "alloy-rlp", "alloy-trie", "arbitrary", "bytes", @@ -6716,7 +6711,6 @@ dependencies = [ "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", - "rand 0.8.5", "reth-codecs-derive", "serde", "serde_json", @@ -6817,7 +6811,6 @@ dependencies = [ "paste", "pprof", "proptest", - "rand 0.8.5", "reth-db-api", "reth-fs-util", "reth-libmdbx", @@ -7041,22 +7034,16 @@ dependencies = [ "eyre", "futures-util", "jsonrpsee", - "jsonrpsee-types", "op-alloy-rpc-types-engine", "reth", "reth-chainspec", "reth-db", - "reth-engine-local", "reth-network-peers", "reth-node-builder", - "reth-node-ethereum", "reth-payload-builder", "reth-payload-primitives", - "reth-primitives", "reth-provider", - "reth-rpc", "reth-rpc-layer", - "reth-rpc-types-compat", "reth-stages-types", "reth-tokio-util", "reth-tracing", @@ -7177,7 +7164,6 @@ dependencies = [ "assert_matches", "futures", "metrics", - "rand 0.8.5", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7515,7 +7501,6 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-config", - "reth-db-api", "reth-db-common", "reth-evm", "reth-evm-ethereum", @@ -7984,7 +7969,6 @@ dependencies = [ "serde", "shellexpand", "strum", - "tempfile", "thiserror", "tokio", "toml", @@ -8001,7 +7985,6 @@ dependencies = [ "alloy-primitives", "eyre", "futures", - "futures-util", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -8017,7 +8000,6 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", - "reth-node-core", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -8066,7 +8048,6 @@ dependencies = [ "metrics-util", "procfs 0.16.0", "reqwest", - "reth-chainspec", "reth-db-api", "reth-metrics", "reth-provider", @@ -8215,15 +8196,11 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", - "async-trait", "clap", "eyre", - "jsonrpsee", - "jsonrpsee-types", "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot 0.12.3", - "reqwest", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -8231,7 +8208,6 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-db", - "reth-discv5", "reth-e2e-test-utils", "reth-engine-local", "reth-evm", @@ -8248,18 +8224,12 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc", - "reth-rpc-eth-api", - "reth-rpc-eth-types", - "reth-rpc-types-compat", "reth-tracing", "reth-transaction-pool", "revm", "serde", "serde_json", - "thiserror", "tokio", - "tracing", ] [[package]] @@ -8303,7 +8273,6 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "reth-primitives", - "reth-primitives-traits", ] [[package]] @@ -8688,7 +8657,6 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-rpc-eth-api", - "serde_json", ] [[package]] @@ -8736,7 +8704,6 @@ dependencies = [ "reth-metrics", "reth-network-api", "reth-network-peers", - "reth-node-api", "reth-node-core", "reth-payload-builder", "reth-primitives", @@ -8750,7 +8717,6 @@ dependencies = [ "reth-rpc-server-types", "reth-rpc-types-compat", "reth-tasks", - "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", @@ -8973,7 +8939,6 @@ dependencies = [ "reth-testing-utils", "reth-trie", "reth-trie-db", - "serde_json", "tempfile", "thiserror", "tokio", @@ -9033,11 +8998,8 @@ dependencies = [ "assert_matches", "parking_lot 0.12.3", "rayon", - "reth-chainspec", "reth-db", "reth-db-api", - "reth-nippy-jar", - "reth-node-types", "reth-provider", "reth-prune-types", "reth-stages", @@ -9200,13 +9162,11 @@ dependencies = [ "auto_impl", "bincode", "criterion", - "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", "proptest-arbitrary-interop", "rayon", - "reth-chainspec", "reth-execution-errors", "reth-metrics", "reth-primitives", @@ -9217,7 +9177,6 @@ dependencies = [ "serde", "serde_json", "serde_with", - "tokio", "tracing", "triehash", ] @@ -9253,22 +9212,17 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", - "auto_impl", "derive_more 1.0.0", - "itertools 0.13.0", "metrics", "proptest", "proptest-arbitrary-interop", - "rayon", "reth-chainspec", "reth-db", "reth-db-api", "reth-execution-errors", "reth-metrics", - "reth-node-types", "reth-primitives", "reth-provider", - "reth-stages-types", "reth-storage-errors", "reth-trie", "reth-trie-common", @@ -9276,8 +9230,6 @@ dependencies = [ "serde", "serde_json", "similar-asserts", - "tokio", - "tokio-stream", "tracing", "triehash", ] @@ -9297,7 +9249,6 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-db", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives", @@ -9321,15 +9272,12 @@ dependencies = [ "pretty_assertions", "proptest", "rand 0.8.5", - "rayon", - "reth-primitives", "reth-testing-utils", "reth-tracing", "reth-trie", "reth-trie-common", "smallvec", "thiserror", - "tracing", ] [[package]] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 476f9cd5cec..8b2b77dd665 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -96,7 +96,6 @@ backon.workspace = true similar-asserts.workspace = true [dev-dependencies] -reth-discv4.workspace = true tempfile.workspace = true [features] diff --git a/book/sources/exex/remote/Cargo.toml b/book/sources/exex/remote/Cargo.toml index 6eeb848cacf..6cca3a841f0 100644 --- a/book/sources/exex/remote/Cargo.toml +++ b/book/sources/exex/remote/Cargo.toml @@ -6,9 +6,11 @@ edition = "2021" [dependencies] # reth reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} -reth-node-api = { git = "https://github.com/paradigmxyz/reth.git"} +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } +reth-node-api = { git = "https://github.com/paradigmxyz/reth.git" } reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # async @@ -49,4 +51,4 @@ path = "src/exex.rs" [[bin]] name = "consumer" -path = "src/consumer.rs" \ No newline at end of file +path = "src/consumer.rs" diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 2742d704054..9fa3e2b60ab 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -13,11 +13,8 @@ workspace = true [dependencies] reth.workspace = true reth-chainspec.workspace = true -reth-engine-local.workspace = true -reth-primitives.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } -reth-rpc.workspace = true reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-primitives.workspace = true @@ -26,11 +23,8 @@ reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true -reth-node-ethereum.workspace = true -reth-rpc-types-compat.workspace = true # rpc -jsonrpsee-types.workspace = true jsonrpsee.workspace = true # ethereum @@ -48,4 +42,4 @@ alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } -tracing.workspace = true \ No newline at end of file +tracing.workspace = true diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 91c9cd5422d..3a618f4fd7a 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -76,7 +76,6 @@ reth-chainspec.workspace = true alloy-rlp.workspace = true assert_matches.workspace = true -rand.workspace = true [features] test-utils = [ @@ -86,5 +85,5 @@ test-utils = [ "reth-prune-types", "reth-stages/test-utils", "reth-static-file", - "reth-tracing" + "reth-tracing", ] diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 62ea234cd5b..7b4b6c53c09 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -35,7 +35,6 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true alloy-consensus.workspace = true [features] diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 213071cbfb5..29093adc8a3 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -43,14 +43,12 @@ reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-node-core.workspace = true reth-e2e-test-utils.workspace = true reth-tasks.workspace = true futures.workspace = true alloy-primitives.workspace = true alloy-genesis.workspace = true tokio.workspace = true -futures-util.workspace = true serde_json.workspace = true alloy-consensus.workspace = true diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 6a3815e4045..27a9d1576c8 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -17,7 +17,10 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-evm.workspace = true -reth-exex-types = { workspace = true, features = ["serde", "serde-bincode-compat"] } +reth-exex-types = { workspace = true, features = [ + "serde", + "serde-bincode-compat", +] } reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true @@ -51,7 +54,6 @@ tracing.workspace = true [dev-dependencies] reth-blockchain-tree.workspace = true -reth-db-api.workspace = true reth-db-common.workspace = true reth-evm-ethereum.workspace = true reth-node-api.workspace = true diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml index 97c8e65cbbc..c9b8fdd5bf2 100644 --- a/crates/net/network-types/Cargo.toml +++ b/crates/net/network-types/Cargo.toml @@ -22,7 +22,7 @@ serde = { workspace = true, optional = true } humantime-serde = { workspace = true, optional = true } serde_json = { workspace = true } -# misc +# misc tracing.workspace = true [features] diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index d7d95751cc5..a6ae1db5e01 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -74,7 +74,6 @@ futures.workspace = true # test vectors generation proptest.workspace = true tokio.workspace = true -tempfile.workspace = true [features] optimism = ["reth-primitives/optimism"] diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 76a3a7f6632..9efdbd4959d 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -35,7 +35,6 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true -reth-chainspec.workspace = true socket2 = { version = "0.5", default-features = false } reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8e359e60265..fbe787d6e16 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -19,7 +19,6 @@ reth-payload-builder.workspace = true reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true -reth-rpc-types-compat.workspace = true reth-node-api.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true @@ -29,10 +28,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true -reth-discv5.workspace = true -reth-rpc-eth-types.workspace = true -reth-rpc-eth-api.workspace = true -reth-rpc.workspace = true # op-reth reth-optimism-payload-builder.workspace = true @@ -51,21 +46,13 @@ alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true -# async -async-trait.workspace = true -reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } -tracing.workspace = true - # misc clap.workspace = true serde.workspace = true eyre.workspace = true parking_lot.workspace = true -thiserror.workspace = true # rpc -jsonrpsee.workspace = true -jsonrpsee-types.workspace = true serde_json.workspace = true [dev-dependencies] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 2054de7305b..a2d4c20a8b7 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,6 +13,5 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-primitives-traits.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 05ccd9081a2..5661fb8f846 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -75,7 +75,6 @@ alloy-genesis.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true bincode.workspace = true -modular-bitfield.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 6e9e469ec44..363e2295530 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -37,12 +37,9 @@ alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } -[dev-dependencies] -serde_json.workspace = true - [features] client = [ "jsonrpsee/client", "jsonrpsee/async-client", - "reth-rpc-eth-api/client" + "reth-rpc-eth-api/client", ] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 817d2a3d76b..cc72c2ebf92 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -64,8 +64,6 @@ reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-tokio-util.workspace = true -reth-node-api.workspace = true reth-rpc-types-compat.workspace = true alloy-primitives.workspace = true diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 3d4227d8a27..81f35a4b390 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -24,7 +24,9 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } -reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } +reth-primitives-traits = { workspace = true, features = [ + "serde-bincode-compat", +] } reth-provider.workspace = true reth-execution-types.workspace = true reth-prune.workspace = true @@ -82,9 +84,6 @@ tempfile.workspace = true # Stage benchmarks criterion = { workspace = true, features = ["async_tokio"] } -# io -serde_json.workspace = true - [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { workspace = true, features = [ "flamegraph", diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 8fa89e12e0f..d22b116cdc5 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,17 +13,14 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true -reth-nippy-jar.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true -reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 640ec8c9561..21a1897f1c7 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -39,8 +39,6 @@ alloy-primitives = { workspace = true, features = [ "rand", ] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -alloy-rlp.workspace = true -rand.workspace = true test-fuzz.workspace = true serde_json.workspace = true diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 9bcd54f3860..492178775b6 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -35,15 +35,9 @@ proptest = { workspace = true, optional = true } reth-primitives = { workspace = true, features = ["arbitrary"] } reth-codecs.workspace = true -arbitrary = { workspace = true, features = ["derive"] } proptest-arbitrary-interop.workspace = true -proptest.workspace = true test-fuzz.workspace = true [features] test-utils = ["arbitrary"] -arbitrary = [ - "reth-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", -] +arbitrary = ["reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest"] diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index a075f772463..356672f2548 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -58,7 +58,6 @@ strum = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # reth libs with arbitrary reth-primitives = { workspace = true, features = ["arbitrary"] } -rand.workspace = true serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index a0e1acbce35..e75b0456eb9 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -17,7 +17,6 @@ reth-primitives.workspace = true reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true -reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true reth-trie.workspace = true @@ -32,10 +31,7 @@ alloy-primitives.workspace = true tracing.workspace = true # misc -rayon.workspace = true derive_more.workspace = true -auto_impl.workspace = true -itertools.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } @@ -56,7 +52,6 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-errors.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } -reth-node-types.workspace = true alloy-consensus.workspace = true @@ -66,12 +61,6 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } -tokio-stream.workspace = true serde_json.workspace = true similar-asserts.workspace = true diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 64a4644bdce..cc35fe9f914 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -15,7 +15,6 @@ workspace = true # reth reth-primitives.workspace = true reth-db.workspace = true -reth-db-api.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true reth-execution-errors.workspace = true @@ -46,7 +45,11 @@ reth-trie = { workspace = true, features = ["test-utils"] } # misc rand.workspace = true -tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } +tokio = { workspace = true, default-features = false, features = [ + "sync", + "rt", + "macros", +] } rayon.workspace = true criterion = { workspace = true, features = ["async_tokio"] } proptest.workspace = true diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 4ba6ed0f2ec..26d036f57ff 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-tracing.workspace = true reth-trie-common.workspace = true reth-trie.workspace = true @@ -23,16 +22,11 @@ reth-trie.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -# tracing -tracing.workspace = true - # misc -rayon.workspace = true smallvec = { workspace = true, features = ["const_new"] } thiserror.workspace = true [dev-dependencies] -reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 31b5ac3e25c..77fc5739770 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -31,7 +31,6 @@ tracing.workspace = true # misc rayon.workspace = true -derive_more.workspace = true auto_impl.workspace = true itertools.workspace = true @@ -50,7 +49,6 @@ serde_with = { workspace = true, optional = true } [dev-dependencies] # reth -reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -60,11 +58,6 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } serde_json.workspace = true criterion.workspace = true bincode.workspace = true diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml index d59d16f35cf..18c136671c0 100644 --- a/examples/custom-rlpx-subprotocol/Cargo.toml +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -13,8 +13,6 @@ reth-eth-wire.workspace = true reth-network.workspace = true reth-network-api.workspace = true reth-node-ethereum.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-primitives.workspace = true reth.workspace = true tokio-stream.workspace = true eyre.workspace = true diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index bdf9a27ce56..e18f32a6473 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -20,6 +20,5 @@ reth-primitives.workspace = true serde_json.workspace = true reth-tracing.workspace = true tokio-stream.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } reth-discv4 = { workspace = true, features = ["test-utils"] } alloy-primitives.workspace = true From d0ac83394616e97ccc559c92e0f594b18cfa4440 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Sat, 19 Oct 2024 18:56:48 +0200 Subject: [PATCH 050/970] perf: avoid cloning in payload builder (#11899) --- crates/ethereum/payload/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index e09228302e4..7f94acf723c 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -339,11 +339,12 @@ where // and 4788 contract call db.merge_transitions(BundleRetention::Reverts); + let requests_hash = requests.as_ref().map(|requests| requests.requests_hash()); let execution_outcome = ExecutionOutcome::new( db.take_bundle(), - vec![receipts.clone()].into(), + vec![receipts].into(), block_number, - vec![requests.clone().unwrap_or_default()], + vec![requests.unwrap_or_default()], ); let receipts_root = execution_outcome.receipts_root_slow(block_number).expect("Number is in range"); @@ -411,7 +412,7 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_hash: requests.map(|r| r.requests_hash()), + requests_hash, }; // seal the block From cd828c06d9fa9638951060c6ca3772d9a7c619a6 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 19 Oct 2024 19:59:32 +0200 Subject: [PATCH 051/970] feat: switch to composable executor for Ethereum (#11838) --- Cargo.lock | 2 +- crates/ethereum/evm/Cargo.toml | 3 +- crates/ethereum/evm/src/execute.rs | 643 ++++------ crates/ethereum/evm/src/lib.rs | 4 +- crates/ethereum/evm/src/strategy.rs | 1176 ------------------ crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/evm.rs | 4 +- crates/ethereum/node/src/lib.rs | 4 +- crates/ethereum/node/src/node.rs | 8 +- crates/rpc/rpc-builder/tests/it/utils.rs | 9 +- crates/stages/stages/src/stages/execution.rs | 8 +- examples/custom-evm/src/main.rs | 9 +- examples/stateful-precompile/src/main.rs | 15 +- 13 files changed, 277 insertions(+), 1609 deletions(-) delete mode 100644 crates/ethereum/evm/src/strategy.rs diff --git a/Cargo.lock b/Cargo.lock index 62ff16bb9d6..25a62d0c043 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7443,7 +7443,6 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-primitives", - "reth-prune-types", "reth-revm", "reth-testing-utils", "revm-primitives", @@ -7995,6 +7994,7 @@ dependencies = [ "reth-e2e-test-utils", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-network", diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 7215efa68c6..8cbc92f90f3 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -18,8 +18,6 @@ reth-evm.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } reth-revm.workspace = true reth-ethereum-consensus.workspace = true -reth-prune-types.workspace = true -reth-execution-types.workspace = true reth-consensus.workspace = true # Ethereum @@ -36,6 +34,7 @@ reth-testing-utils.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-execution-types.workspace = true secp256k1.workspace = true serde_json.workspace = true alloy-genesis.workspace = true diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b4a90d40990..185f351dd9f 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,158 +1,161 @@ -//! Ethereum block executor. +//! Ethereum block execution strategy. use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; -use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; +use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, + BlockExecutionStrategyFactory, BlockValidationError, ProviderError, }, state_change::post_block_balance_increments, - system_calls::{NoopHook, OnStateHook, SystemCaller}, + system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, }; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, EthereumHardfork, Header, Receipt}; -use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, - db::{states::bundle_state::BundleRetention, State}, - Evm, -}; +use reth_primitives::{BlockWithSenders, Receipt}; +use reth_revm::db::{states::bundle_state::BundleRetention, BundleState, State}; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, }; -/// Provides executors to execute regular ethereum blocks +/// Factory for [`EthExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl EthExecutorProvider { - /// Creates a new default ethereum executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new default ethereum executor strategy factory. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) } - /// Returns a new provider for the mainnet. + /// Returns a new factory for the mainnet. pub fn mainnet() -> Self { Self::ethereum(MAINNET.clone()) } } -impl EthExecutorProvider { - /// Creates a new executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl EthExecutorProvider -where - EvmConfig: ConfigureEvm
, -{ - fn eth_executor(&self, db: DB) -> EthBlockExecutor - where - DB: Database>, - { - EthBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) - } -} - -impl BlockExecutorProvider for EthExecutorProvider +impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory where - EvmConfig: ConfigureEvm
, + EvmConfig: + Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, { - type Executor + Display>> = - EthBlockExecutor; - - type BatchExecutor + Display>> = - EthBatchExecutor; + type Strategy + Display>> = + EthExecutionStrategy; - fn executor(&self, db: DB) -> Self::Executor + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - self.eth_executor(db) + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.eth_executor(db); - EthBatchExecutor { executor, batch_record: BlockBatchRecord::default() } - } -} - -/// Helper type for the output of executing a block. -#[derive(Debug, Clone)] -struct EthExecuteOutput { - receipts: Vec, - requests: Requests, - gas_used: u64, } -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -struct EthEvmExecutor { +/// Block execution strategy for Ethereum. +#[allow(missing_debug_implementations)] +pub struct EthExecutionStrategy +where + EvmConfig: Clone, +{ /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl EthEvmExecutor +impl EthExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts of the transactions in the - /// block, the total gas used and the list of EIP-7685 [requests](Requests). - /// - /// This applies the pre-execution and post-execution changes that require an [EVM](Evm), and - /// executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. + /// Creates a new [`EthExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// # Note + /// # Caution /// - /// It does __not__ apply post-execution changes that do not require an [EVM](Evm), for that see - /// [`EthBlockExecutor::post_execution`]. - fn execute_state_transitions( + /// This does not initialize the tx environment. + fn evm_env_for_block( &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - ) -> Result - where - DB: Database, - DB::Error: Into + Display, - F: OnStateHook + 'static, - { - let mut system_caller = SystemCaller::new(self.evm_config.clone(), &self.chain_spec); - if let Some(hook) = state_hook { - system_caller.with_state_hook(Some(Box::new(hook) as Box)); - } + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_pre_execution_changes(block, &mut evm)?; + + Ok(()) + } - system_caller.apply_pre_execution_changes(block, &mut evm)?; + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // execute transactions let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -178,7 +181,7 @@ where error: Box::new(new_err), } })?; - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -200,137 +203,36 @@ where }, ); } + Ok((receipts, cumulative_gas_used)) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + receipts: &[Receipt], + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { // Collect all EIP-6110 deposits let deposit_requests = - crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; + crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; let mut requests = Requests::new(vec![deposit_requests]); - requests.extend(system_caller.apply_post_execution_changes(&mut evm)?); + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); requests } else { Requests::default() }; + drop(evm); - Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used }) - } -} - -/// A basic Ethereum block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct EthBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: EthEvmExecutor, - /// The state to use for execution - state: State, -} - -impl EthBlockExecutor { - /// Creates a new Ethereum block executor. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } - } - - #[inline] - fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block, the total gas used and the list of - /// EIP-7685 [requests](Requests). - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - state_hook: Option, - ) -> Result - where - F: OnStateHook + 'static, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - let output = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_state_transitions(block, evm, state_hook) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - Ok(output) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes that do not require an [EVM](Evm), such as: block - /// rewards, withdrawals, and irregular DAO hardfork state change - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { let mut balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec, block, total_difficulty); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec().fork(EthereumHardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -347,155 +249,59 @@ where .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(()) + Ok(requests) } -} - -impl Executor for EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - /// Executes the block and commits the changes to the internal state. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; + fn state_mut(&mut self) -> &mut State { + &mut self.state + } - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook + 'static, - { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = self - .execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention + fn finish(&mut self) -> BundleState { self.state.merge_transitions(BundleRetention::Reverts); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) + self.state.take_bundle() } -} -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct EthBatchExecutor { - /// The executor used to execute single blocks - /// - /// All state changes are committed to the [State]. - executor: EthBlockExecutor, - /// Keeps track of the batch and records receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} -impl EthBatchExecutor { - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) } } -impl BatchExecutor for EthBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - let EthExecuteOutput { receipts, requests, gas_used: _ } = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts, &requests)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - // store requests in the set - self.batch_record.save_requests(requests); - - Ok(()) - } - - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) - } - - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } +/// Helper type with backwards compatible methods to obtain Ethereum executor +/// providers. +#[derive(Debug)] +pub struct EthExecutorProvider; - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); +impl EthExecutorProvider { + /// Creates a new default ethereum executor provider. + pub fn ethereum( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::ethereum(chain_spec)) } - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) + /// Returns a new provider for the mainnet. + pub fn mainnet() -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()) } } #[cfg(test)] mod tests { use super::*; - use alloy_consensus::TxLegacy; + use alloy_consensus::{Header, TxLegacy}; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, @@ -504,6 +310,10 @@ mod tests { }; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; + use reth_evm::execute::{ + BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, + }; + use reth_execution_types::BlockExecutionOutput; use reth_primitives::{ constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, }; @@ -553,8 +363,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { evm_config: EthEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -573,10 +388,11 @@ mod tests { let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + // attempt to execute a block without parent beacon block root, expect err - let err = provider - .executor(StateProviderDatabase::new(&db)) - .execute( + let err = executor + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -605,19 +421,24 @@ mod tests { // fix header, set a gas limit header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let mut executor = provider.executor(StateProviderDatabase::new(&db)); - // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_without_verification( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + }, + }, + senders: vec![], }, - senders: vec![], - }, - U256::ZERO, + U256::ZERO, + ) + .into(), ) .unwrap(); @@ -631,16 +452,17 @@ mod tests { let parent_beacon_block_root_index = timestamp_index % history_buffer_length + history_buffer_length; - // get timestamp storage and compare - let timestamp_storage = - executor.state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist") + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -747,7 +569,8 @@ mod tests { ); // ensure that the nonce of the system address account has not changed - let nonce = executor.state_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; + let nonce = + executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); assert_eq!(nonce, 0); } @@ -805,11 +628,12 @@ mod tests { // there is no system contract call so there should be NO STORAGE CHANGES // this means we'll check the transition state - let transition_state = executor - .state_mut() - .transition_state - .take() - .expect("the evm should be initialized with bundle updates"); + let transition_state = executor.with_state_mut(|state| { + state + .transition_state + .take() + .expect("the evm should be initialized with bundle updates") + }); // assert that it is the default (empty) transition state assert_eq!(transition_state, TransitionState::default()); @@ -867,17 +691,15 @@ mod tests { timestamp_index % history_buffer_length + history_buffer_length; // get timestamp storage and compare - let timestamp_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)) - .unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -903,7 +725,6 @@ mod tests { db } - #[test] fn eip_2935_pre_fork() { let db = create_state_provider_with_block_hashes(1); @@ -942,12 +763,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -986,12 +806,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1033,21 +852,20 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap(), + .unwrap()), U256::ZERO ); // the hash of the block itself should not be in storage - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1090,15 +908,15 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage( HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) ) - .unwrap(), + .unwrap()), U256::ZERO ); } @@ -1141,12 +959,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 1, this should not fail let header = Header { @@ -1174,16 +991,18 @@ mod tests { ); // the block hash of genesis should now be in storage, but not block 1 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 2, this should not fail let header = Header { @@ -1210,20 +1029,24 @@ mod tests { ); // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) .unwrap() - .is_zero()); + .is_zero())); } #[test] diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index ac9bb5a0bbb..9abb1197636 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -17,7 +17,7 @@ extern crate alloc; -use alloc::vec::Vec; +use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; @@ -25,7 +25,6 @@ use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; -use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; @@ -33,7 +32,6 @@ use reth_ethereum_forks::EthereumHardfork; use reth_primitives::constants::EIP1559_INITIAL_BASE_FEE; pub mod execute; -pub mod strategy; /// Ethereum DAO hardfork state change data. pub mod dao_fork; diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs deleted file mode 100644 index 55fbfffc8e9..00000000000 --- a/crates/ethereum/evm/src/strategy.rs +++ /dev/null @@ -1,1176 +0,0 @@ -//! Ethereum block execution strategy, - -use crate::{ - dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - EthEvmConfig, -}; -use alloc::sync::Arc; -use alloy_consensus::Transaction as _; -use alloy_eips::eip7685::Requests; -use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; -use reth_consensus::ConsensusError; -use reth_ethereum_consensus::validate_block_post_execution; -use reth_evm::{ - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, - BlockValidationError, ProviderError, - }, - state_change::post_block_balance_increments, - system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_primitives::{BlockWithSenders, Header, Receipt}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - Database, DatabaseCommit, State, -}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256}; - -/// Factory for [`EthExecutionStrategy`]. -#[derive(Debug, Clone)] -pub struct EthExecutionStrategyFactory { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl EthExecutionStrategyFactory { - /// Creates a new default ethereum executor strategy factory. - pub fn ethereum(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) - } - - /// Returns a new factory for the mainnet. - pub fn mainnet() -> Self { - Self::ethereum(MAINNET.clone()) - } -} - -impl EthExecutionStrategyFactory { - /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } - } -} - -impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory { - type Strategy + Display>> = EthExecutionStrategy; - - fn create_strategy(&self, db: DB) -> Self::Strategy - where - DB: Database + Display>, - { - let state = - State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) - } -} - -/// Block execution strategy for Ethereum. -#[allow(missing_debug_implementations)] -pub struct EthExecutionStrategy { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, - /// Current state for block execution. - state: State, - /// Utility to call system smart contracts. - system_caller: SystemCaller, -} - -impl EthExecutionStrategy { - /// Creates a new [`EthExecutionStrategy`] - pub fn new(state: State, chain_spec: Arc, evm_config: EthEvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); - Self { state, chain_spec, evm_config, system_caller } - } -} - -impl EthExecutionStrategy -where - DB: Database + Display>, - EvmConfig: ConfigureEvm
, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } -} - -impl BlockExecutionStrategy for EthExecutionStrategy -where - DB: Database + Display>, -{ - type Error = BlockExecutionError; - - fn apply_pre_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error> { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); - self.state.set_state_clear_flag(state_clear_flag); - - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - self.system_caller.apply_pre_execution_changes(block, &mut evm)?; - - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); - - // Execute transaction. - let result_and_state = evm.transact().map_err(move |err| { - let new_err = err.map_db_err(|e| e.into()); - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } - })?; - self.system_caller.on_state(&result_and_state); - let ResultAndState { result, state } = result_and_state; - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push( - #[allow(clippy::needless_update)] // side-effect of optimism fields - Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs(), - ..Default::default() - }, - ); - } - Ok((receipts, cumulative_gas_used)) - } - - fn apply_post_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - receipts: &[Receipt], - ) -> Result { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { - // Collect all EIP-6110 deposits - let deposit_requests = - crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - - let mut requests = Requests::new(vec![deposit_requests]); - requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); - requests - } else { - Requests::default() - }; - drop(evm); - - let mut balance_increments = - post_block_balance_increments(&self.chain_spec, block, total_difficulty); - - // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { - // drain balances from hardcoded addresses. - let drained_balance: u128 = self - .state - .drain_balances(DAO_HARDKFORK_ACCOUNTS) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)? - .into_iter() - .sum(); - - // return balance to DAO beneficiary. - *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; - } - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(requests) - } - - fn state_ref(&self) -> &State { - &self.state - } - - fn state_mut(&mut self) -> &mut State { - &mut self.state - } - - fn with_state_hook(&mut self, hook: Option>) { - self.system_caller.with_state_hook(hook); - } - - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - receipts: &[Receipt], - requests: &Requests, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::TxLegacy; - use alloy_eips::{ - eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, - eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, - eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, - eip7685::EMPTY_REQUESTS_HASH, - }; - use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; - use reth_chainspec::{ChainSpecBuilder, ForkCondition}; - use reth_evm::execute::{ - BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, - }; - use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{ - constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, - }; - use reth_revm::{ - database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, - }; - use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::BLOCKHASH_SERVE_WINDOW; - use secp256k1::{Keypair, Secp256k1}; - use std::collections::HashMap; - - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let beacon_root_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOTS_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - BEACON_ROOTS_ADDRESS, - beacon_root_contract_account, - Some(BEACON_ROOTS_CODE.clone()), - HashMap::default(), - ); - - db - } - - fn create_state_provider_with_withdrawal_requests_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let withdrawal_requests_contract_account = Account { - nonce: 1, - balance: U256::ZERO, - bytecode_hash: Some(keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), - }; - - db.insert_account( - WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, - withdrawal_requests_contract_account, - Some(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), - HashMap::default(), - ); - - db - } - - fn executor_provider( - chain_spec: Arc, - ) -> BasicBlockExecutorProvider { - let strategy_factory = - EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); - - BasicBlockExecutorProvider::new(strategy_factory) - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - - assert_eq!( - err.as_validation().unwrap().clone(), - BlockValidationError::MissingParentBeaconBlockRoot - ); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist") - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = StateProviderTest::default(); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // attempt to execute an empty block with parent beacon block root, this should not fail - provider - .batch_executor(StateProviderDatabase::new(&db)) - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // // during the pre-block call - - let mut db = create_state_provider_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::default()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = - executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_state_provider_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header().clone(); - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field - should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let transition_state = executor.with_state_mut(|state| { - state - .transition_state - .take() - .expect("the evm should be initialized with bundle updates") - }); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - /// Create a state provider with blockhashes and the EIP-2935 system contract. - fn create_state_provider_with_block_hashes(latest_block: u64) -> StateProviderTest { - let mut db = StateProviderTest::default(); - for block_number in 0..=latest_block { - db.insert_block_hash(block_number, keccak256(block_number.to_string())); - } - - let blockhashes_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(HISTORY_STORAGE_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - HISTORY_STORAGE_ADDRESS, - blockhashes_contract_account, - Some(HISTORY_STORAGE_CODE.clone()), - HashMap::default(), - ); - - db - } - #[test] - fn eip_2935_pre_fork() { - let db = create_state_provider_with_block_hashes(1); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Never) - .build(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // construct the header for block one - let header = Header { timestamp: 1, number: 1, ..Header::default() }; - - // attempt to execute an empty block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since this is before the fork - // was activated - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_genesis() { - let db = create_state_provider_with_block_hashes(0); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let header = chain_spec.genesis_header().clone(); - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute genesis block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since there are no blocks - // preceding genesis - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_within_window_bounds() { - let fork_activation_block = (BLOCKHASH_SERVE_WINDOW - 10) as u64; - let db = create_state_provider_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_hash: Some(EMPTY_REQUESTS_HASH), - ..Header::default() - }; - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the fork activation block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap()), - U256::ZERO - ); - - // the hash of the block itself should not be in storage - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_outside_window_bounds() { - let fork_activation_block = (BLOCKHASH_SERVE_WINDOW + 256) as u64; - let db = create_state_provider_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_hash: Some(EMPTY_REQUESTS_HASH), - ..Header::default() - }; - - // attempt to execute the fork activation block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage( - HISTORY_STORAGE_ADDRESS, - U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) - ) - .unwrap()), - U256::ZERO - ); - } - - #[test] - fn eip_2935_state_transition_inside_fork() { - let db = create_state_provider_with_block_hashes(2); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header().clone(); - header.requests_hash = Some(EMPTY_REQUESTS_HASH); - let header_hash = header.hash_slow(); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the genesis block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // nothing should be written as the genesis has no ancestors - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - - // attempt to execute block 1, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 1, - requests_hash: Some(EMPTY_REQUESTS_HASH), - ..Header::default() - }; - let header_hash = header.hash_slow(); - - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis should now be in storage, but not block 1 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) - .unwrap() - .is_zero())); - - // attempt to execute block 2, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 2, - requests_hash: Some(EMPTY_REQUESTS_HASH), - ..Header::default() - }; - - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_7002() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut db = create_state_provider_with_withdrawal_requests_contract(); - - let secp = Secp256k1::new(); - let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - db.insert_account( - sender_address, - Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, - None, - HashMap::default(), - ); - - // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 - let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("0203040506070809"); - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - assert_eq!(input.len(), 56); - - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - // measured - header.gas_used = 135_856; - header.receipts_root = - b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: header.gas_used, - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(2), - input, - }), - ); - - let provider = executor_provider(chain_spec); - - let executor = provider.executor(StateProviderDatabase::new(&db)); - - let BlockExecutionOutput { receipts, requests, .. } = executor - .execute( - ( - &Block { - header, - body: BlockBody { transactions: vec![tx], ..Default::default() }, - } - .with_recovered_senders() - .unwrap(), - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let receipt = receipts.first().unwrap(); - assert!(receipt.success); - - assert!(requests[0].is_empty(), "there should be no deposits"); - assert!(!requests[1].is_empty(), "there should be a withdrawal"); - assert!(requests[2].is_empty(), "there should be no consolidations"); - } - - #[test] - fn block_gas_limit_error() { - // Create a chain specification with fork conditions set for Prague - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - // Create a state provider with the withdrawal requests contract pre-deployed - let mut db = create_state_provider_with_withdrawal_requests_contract(); - - // Initialize Secp256k1 for key pair generation - let secp = Secp256k1::new(); - // Generate a new key pair for the sender - let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); - // Get the sender's address from the public key - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - // Insert the sender account into the state with a nonce of 1 and a balance of 1 ETH in Wei - db.insert_account( - sender_address, - Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, - None, - HashMap::default(), - ); - - // Define the validator public key and withdrawal amount as fixed bytes - let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); - // Concatenate the validator public key and withdrawal amount into a single byte array - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - // Ensure the input length is 56 bytes - assert_eq!(input.len(), 56); - - // Create a genesis block header with a specified gas limit and gas used - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - header.gas_used = 134_807; - header.receipts_root = - b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - // Create a transaction with a gas limit higher than the block gas limit - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 2_500_000, // higher than block gas limit - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - value: U256::from(1), - input, - }), - ); - - // Create an executor from the state provider - let executor = executor_provider(chain_spec).executor(StateProviderDatabase::new(&db)); - - // Execute the block and capture the result - let exec_result = executor.execute( - ( - &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } - .with_recovered_senders() - .unwrap(), - U256::ZERO, - ) - .into(), - ); - - // Check if the execution result is an error and assert the specific error type - match exec_result { - Ok(_) => panic!("Expected block gas limit error"), - Err(err) => assert_eq!( - *err.as_validation().unwrap(), - BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: 2_500_000, - block_available_gas: 1_500_000, - } - ), - } - } -} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 29093adc8a3..a3fe5ed4503 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -21,6 +21,7 @@ reth-tracing.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true +reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true reth-auto-seal-consensus.workspace = true diff --git a/crates/ethereum/node/src/evm.rs b/crates/ethereum/node/src/evm.rs index d710d8d8d45..bcdcaac6bfa 100644 --- a/crates/ethereum/node/src/evm.rs +++ b/crates/ethereum/node/src/evm.rs @@ -1,6 +1,8 @@ //! Ethereum EVM support #[doc(inline)] -pub use reth_evm_ethereum::execute::EthExecutorProvider; +pub use reth_evm::execute::BasicBlockExecutorProvider; +#[doc(inline)] +pub use reth_evm_ethereum::execute::{EthExecutionStrategyFactory, EthExecutorProvider}; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 37ebc33c22b..421cee37fb0 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -14,7 +14,9 @@ use revm as _; pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::{EthEvmConfig, EthExecutorProvider}; +pub use evm::{ + BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthExecutorProvider, +}; pub mod node; pub use node::EthereumNode; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index d3301b2082e..3df46b4856f 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -9,7 +9,8 @@ use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, EthereumEngineValidator, }; -use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::NetworkHandle; use reth_node_api::{ AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, @@ -136,7 +137,7 @@ where Node: FullNodeTypes, { type EVM = EthEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, @@ -144,7 +145,8 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = EthEvmConfig::new(ctx.chain_spec()); - let executor = EthExecutorProvider::new(chain_spec, evm_config.clone()); + let strategy_factory = EthExecutionStrategyFactory::new(chain_spec, evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 847de99564e..44614ea49a8 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -4,7 +4,8 @@ use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; -use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; @@ -124,7 +125,7 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< TokioTaskExecutor, TestCanonStateSubscriptions, EthEvmConfig, - EthExecutorProvider, + BasicBlockExecutorProvider, > { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) @@ -133,5 +134,7 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()) .with_evm_config(EthEvmConfig::new(MAINNET.clone())) - .with_block_executor(EthExecutorProvider::ethereum(MAINNET.clone())) + .with_block_executor( + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()), + ) } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 7bb6ebc59e0..47cd9d0445a 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -667,7 +667,8 @@ mod tests { use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm::execute::BasicBlockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_execution_errors::BlockValidationError; use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry}; use reth_provider::{ @@ -678,10 +679,11 @@ mod tests { use reth_stages_api::StageUnitCheckpoint; use std::collections::BTreeMap; - fn stage() -> ExecutionStage { - let executor_provider = EthExecutorProvider::ethereum(Arc::new( + fn stage() -> ExecutionStage> { + let strategy_factory = EthExecutionStrategyFactory::ethereum(Arc::new( ChainSpecBuilder::mainnet().berlin_activated().build(), )); + let executor_provider = BasicBlockExecutorProvider::new(strategy_factory); ExecutionStage::new( executor_provider, ExecutionStageThresholds { diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 9c421f9c6a5..55063fc9bbc 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -31,7 +31,7 @@ use reth_node_api::{ use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, - EthExecutorProvider, EthereumNode, + BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; use reth_primitives::{ revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, @@ -158,7 +158,7 @@ where Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -166,7 +166,10 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { Ok(( MyEvmConfig::new(ctx.chain_spec()), - EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::new(ctx.chain_spec())), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + MyEvmConfig::new(ctx.chain_spec()), + )), )) } } diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 26ebdfe4124..b0165e4de26 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -20,7 +20,10 @@ use reth::{ use reth_chainspec::{Chain, ChainSpec}; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes, NodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{node::EthereumAddOns, EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{ + node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, + EthereumNode, +}; use reth_primitives::{ revm_primitives::{SpecId, StatefulPrecompileMut}, Header, TransactionSigned, @@ -224,7 +227,7 @@ where Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -234,7 +237,13 @@ where inner: EthEvmConfig::new(ctx.chain_spec()), precompile_cache: self.precompile_cache.clone(), }; - Ok((evm_config.clone(), EthExecutorProvider::new(ctx.chain_spec(), evm_config))) + Ok(( + evm_config.clone(), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + evm_config, + )), + )) } } From 6c026daf920b28cd7e97e63ef20fea6c58af0f45 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Sat, 19 Oct 2024 22:20:23 +0200 Subject: [PATCH 052/970] docs: explain how to add metrics to grafana (#11875) --- etc/README.md | 79 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/etc/README.md b/etc/README.md index f80b5b774b6..28c71b04688 100644 --- a/etc/README.md +++ b/etc/README.md @@ -2,7 +2,8 @@ This directory contains miscellaneous files, such as example Grafana dashboards and Prometheus configuration. -The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be up to date. +The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be +up to date. ### Overview @@ -11,8 +12,78 @@ The files in this directory may undergo a lot of changes while reth is unstable, ### Docker Compose -To run Reth, Grafana or Prometheus with Docker Compose, refer to the [docker docs](/book/installation/docker.md#using-docker-compose). +To run Reth, Grafana or Prometheus with Docker Compose, refer to +the [docker docs](/book/installation/docker.md#using-docker-compose). -### Import Grafana dashboards +### Grafana -Running Grafana in Docker makes it possible to import existing dashboards, refer to [docs on how to run only Grafana in Docker](/book/installation/docker.md#using-docker-compose#run-only-grafana-in-docker). \ No newline at end of file +#### Adding a new metric to Grafana + +To set up a new metric in Reth and its Grafana dashboard: + +1. Add the metric to the codebase following the [metrics section](../docs/design/metrics.md#creating-metrics) + documentation. + +2. Build the Reth image: + + ```bash + docker build . -t reth:local + ``` + + Modify the [docker-compose](./docker-compose.yml) file to use your locally built image for the Reth service. + +3. Run Docker Compose: + + ```bash + docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d + ``` + +4. Access Grafana: + + - Open `http://localhost:3000/` in a browser + - Log in with username and password `admin` + - Navigate to the `Dashboards` tab + +5. Create or modify a dashboard: + + - Select an existing dashboard or create a new one + - Click `Add` > `Visualization` to create a new panel + +6. Configure your metric panel: + + - Set a panel title and description + - Select metric(s) from the `Metrics browser` or use the `PromQL` terminal + - Document your metric(s) by setting units, legends, etc. + - When adding multiple metrics, use field overwrites if needed + +7. Save and arrange: + + - Click `Apply` to save the panel + - Drag the panel to desired position on the dashboard + +8. Export the dashboard: + + - Click `Share` > `Export` + - Toggle `Export for sharing externally` + - Click `Save to file` + +9. Update dashboard file: + - Replace the content of the corresponding file in the [dashboards folder](./grafana/dashboards) with the exported + JSON + +Your new metric is now integrated into the Reth Grafana dashboard. + +#### Import Grafana dashboards + +In order to import new Grafana dashboards or update a dashboard: + +1. Go to `Home` > `Dashboards` + +2. Click `New` > `Import` + +3. Drag the JSON dashboard file to import it + +4. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to + avoid conflict + +5. Delete the old dashboard From cf4a4454ecec03bab4be9cf3d8bccfbd0bb215df Mon Sep 17 00:00:00 2001 From: liamaharon Date: Sun, 20 Oct 2024 07:36:11 +1100 Subject: [PATCH 053/970] fix: feature propagation (#11888) Co-authored-by: Matthias Seitz Co-authored-by: Oliver --- .config/zepter.yaml | 40 +++++++++++++++++ .github/workflows/lint.yml | 17 ++++++++ Cargo.lock | 27 +++++++++++- Cargo.toml | 2 +- bin/reth-bench/Cargo.toml | 13 ++++-- bin/reth/Cargo.toml | 11 ++++- crates/blockchain-tree/Cargo.toml | 23 +++++++++- crates/chain-state/Cargo.toml | 14 +++--- crates/chainspec/Cargo.toml | 29 ++++++++++--- crates/consensus/auto-seal/Cargo.toml | 10 ++++- crates/consensus/beacon/Cargo.toml | 10 +++-- crates/consensus/consensus/Cargo.toml | 10 ++++- crates/engine/local/Cargo.toml | 6 ++- crates/engine/tree/Cargo.toml | 24 ++++++++--- crates/engine/util/Cargo.toml | 7 ++- crates/ethereum-forks/Cargo.toml | 27 +++++++++--- crates/ethereum/evm/Cargo.toml | 12 +++++- crates/ethereum/node/Cargo.toml | 15 ++++++- crates/evm/Cargo.toml | 23 +++++++++- crates/evm/execution-errors/Cargo.toml | 7 ++- crates/evm/execution-types/Cargo.toml | 24 ++++++++--- crates/exex/exex/Cargo.toml | 12 +++++- crates/exex/types/Cargo.toml | 15 ++++++- crates/net/discv4/Cargo.toml | 11 ++++- crates/net/dns/Cargo.toml | 13 +++++- crates/net/downloaders/Cargo.toml | 16 ++++--- crates/net/eth-wire-types/Cargo.toml | 24 ++++++++--- crates/net/eth-wire/Cargo.toml | 19 +++++++-- crates/net/network-api/Cargo.toml | 8 +++- crates/net/network/Cargo.toml | 32 +++++++++++++- crates/net/p2p/Cargo.toml | 14 +++++- crates/node/builder/Cargo.toml | 19 ++++++++- crates/node/core/Cargo.toml | 14 ++++-- crates/optimism/bin/Cargo.toml | 10 ++++- crates/optimism/chainspec/Cargo.toml | 18 ++++---- crates/optimism/cli/Cargo.toml | 13 +++--- crates/optimism/evm/Cargo.toml | 21 ++++++--- crates/optimism/hardforks/Cargo.toml | 12 +++++- crates/optimism/node/Cargo.toml | 42 +++++++++++++----- crates/optimism/payload/Cargo.toml | 11 +++-- crates/optimism/rpc/Cargo.toml | 9 ++-- crates/optimism/storage/Cargo.toml | 6 ++- crates/payload/builder/Cargo.toml | 8 +++- crates/primitives-traits/Cargo.toml | 34 +++++++++++---- crates/primitives/Cargo.toml | 59 +++++++++++++++++++------- crates/revm/Cargo.toml | 21 +++++++-- crates/stages/api/Cargo.toml | 7 ++- crates/stages/stages/Cargo.toml | 25 ++++++++--- crates/storage/codecs/Cargo.toml | 10 ++++- crates/storage/db-api/Cargo.toml | 26 +++++++++--- crates/storage/db-models/Cargo.toml | 13 +++++- crates/storage/db/Cargo.toml | 24 +++++++++-- crates/storage/errors/Cargo.toml | 6 ++- crates/storage/provider/Cargo.toml | 45 +++++++++++++++----- crates/transaction-pool/Cargo.toml | 40 ++++++++++++++--- crates/trie/common/Cargo.toml | 17 ++++++-- crates/trie/db/Cargo.toml | 22 +++++++++- crates/trie/trie/Cargo.toml | 20 +++++++-- testing/ef-tests/Cargo.toml | 6 ++- 59 files changed, 876 insertions(+), 197 deletions(-) create mode 100644 .config/zepter.yaml diff --git a/.config/zepter.yaml b/.config/zepter.yaml new file mode 100644 index 00000000000..8c6425f4ff0 --- /dev/null +++ b/.config/zepter.yaml @@ -0,0 +1,40 @@ +version: + format: 1 + # Minimum zepter version that is expected to work. This is just for printing a nice error + # message when someone tries to use an older version. + binary: 0.13.2 + +# The examples in the following comments assume crate `A` to have a dependency on crate `B`. +workflows: + check: + - [ + "lint", + # Check that `A` activates the features of `B`. + "propagate-feature", + # These are the features to check: + "--features=std,optimism,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench", + # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. + "--left-side-feature-missing=ignore", + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "--left-side-outside-workspace=ignore", + # Auxillary flags: + "--offline", + "--locked", + "--show-path", + "--quiet", + ] + default: + # Running `zepter` with no subcommand will check & fix. + - [$check.0, "--fix"] + +# Will be displayed when any workflow fails: +help: + text: | + Reth uses the Zepter CLI to detect abnormalities in Cargo features, e.g. missing propagation. + + It looks like one more more checks failed; please check the console output. + + You can try to automatically address them by installing zepter (`cargo install zepter --locked`) and simply running `zepter` in the workspace root. + links: + - "https://github.com/paradigmxyz/reth/pull/11888" + - "https://github.com/ggwpez/zepter" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 65c01c3a491..1921859c272 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -219,6 +219,22 @@ jobs: env: RUSTFLAGS: -D warnings + # Check crates correctly propagate features + feature-propagation: + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - uses: actions/checkout@v4 + - name: fetch deps + run: | + # Eagerly pull dependencies + time cargo metadata --format-version=1 --locked > /dev/null + - name: run zepter + run: | + cargo install zepter -f --locked + zepter --version + time zepter run check + lint-success: name: lint success runs-on: ubuntu-latest @@ -236,6 +252,7 @@ jobs: - grafana - no-test-deps - features + - feature-propagation timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Cargo.lock b/Cargo.lock index 25a62d0c043..4def3dcabed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -516,6 +516,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", + "arbitrary", "derive_more 1.0.0", "itertools 0.13.0", "jsonrpsee-types", @@ -1497,6 +1498,17 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "bstr" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +dependencies = [ + "memchr", + "regex-automata 0.4.8", + "serde", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -2258,6 +2270,7 @@ dependencies = [ "lock_api", "once_cell", "parking_lot_core 0.9.10", + "serde", ] [[package]] @@ -3318,6 +3331,7 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ + "serde", "typenum", "version_check", "zeroize", @@ -4512,7 +4526,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4623,6 +4637,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", + "serde", ] [[package]] @@ -4645,6 +4660,7 @@ checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", + "serde", ] [[package]] @@ -5009,6 +5025,7 @@ dependencies = [ "libc", "log", "mio 0.8.11", + "serde", "walkdir", "windows-sys 0.48.0", ] @@ -5277,6 +5294,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "arbitrary", "op-alloy-consensus", "serde", "serde_json", @@ -5370,6 +5388,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ + "arbitrary", "arrayvec", "bitvec", "byte-slice-cast", @@ -10098,6 +10117,10 @@ name = "similar" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +dependencies = [ + "bstr", + "unicode-segmentation", +] [[package]] name = "similar-asserts" @@ -10106,6 +10129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e" dependencies = [ "console", + "serde", "similar", ] @@ -11016,6 +11040,7 @@ dependencies = [ "parking_lot 0.12.3", "rand 0.8.5", "resolv-conf", + "serde", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 6c66e501ef4..5b6912c33f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -585,7 +585,7 @@ pprof = "0.13" proptest = "1.4" proptest-derive = "0.5" serial_test = { default-features = false, version = "3" } -similar-asserts = { default-features = false, version = "1.5.0" } +similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index e4e40daeca9..03844633a92 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -76,9 +76,16 @@ reth-tracing.workspace = true [features] default = ["jemalloc"] -asm-keccak = ["reth-primitives/asm-keccak"] - -jemalloc = ["reth-cli-util/jemalloc"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak" +] + +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc" +] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 8b2b77dd665..8380915d463 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -103,14 +103,21 @@ default = ["jemalloc"] dev = ["reth-cli-commands/dev"] -asm-keccak = ["reth-node-core/asm-keccak", "reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-node-core/asm-keccak", + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak" +] jemalloc = [ "reth-cli-util/jemalloc", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc", ] -jemalloc-prof = ["reth-cli-util/jemalloc"] +jemalloc-prof = [ + "reth-cli-util/jemalloc", + "reth-cli-util/jemalloc-prof" +] tracy-allocator = ["reth-cli-util/tracy-allocator"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index cff117c92b0..aa8fab16fa5 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -63,5 +63,24 @@ alloy-genesis.workspace = true alloy-consensus.workspace = true [features] -test-utils = [] -optimism = ["reth-primitives/optimism", "reth-provider/optimism"] +test-utils = [ + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils" +] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism" +] diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index c9691bec411..9a88a3c54bc 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -56,9 +56,13 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm" + "alloy-signer", + "alloy-signer-local", + "alloy-consensus", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-trie/test-utils", + "revm?/test-utils" ] diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 87df28322a6..5bac582cd8b 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -40,11 +40,26 @@ alloy-genesis.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-eips/std", - "alloy-genesis/std", - "alloy-primitives/std", - "alloy-trie/std", + "alloy-chains/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-trie/std", + "reth-primitives-traits/std", + "alloy-consensus/std", + "once_cell/std" +] +arbitrary = [ + "alloy-chains/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie/arbitrary" +] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-trie-common/test-utils" ] -arbitrary = ["alloy-chains/arbitrary"] -test-utils = [] diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 24985871141..f2bfb43bcce 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -46,4 +46,12 @@ tokio-stream.workspace = true tracing.workspace = true [features] -optimism = ["reth-provider/optimism", "reth-optimism-consensus"] +optimism = [ + "reth-provider/optimism", + "reth-optimism-consensus", + "reth-beacon-consensus/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus?/optimism", + "reth-primitives/optimism", + "revm-primitives/optimism" +] diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 192ae2b93df..dd1e339319b 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -78,8 +78,10 @@ assert_matches.workspace = true [features] optimism = [ - "reth-chainspec", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-blockchain-tree/optimism", + "reth-chainspec", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-blockchain-tree/optimism", + "reth-db/optimism", + "reth-db-api/optimism" ] diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 1736caab543..2faf3f2ac71 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -24,5 +24,11 @@ derive_more.workspace = true [features] default = ["std"] -std = [] -test-utils = [] +std = [ + "reth-primitives/std", + "alloy-primitives/std", + "alloy-eips/std" +] +test-utils = [ + "reth-primitives/test-utils" +] diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index f22ab1f8d56..d9dc6325339 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -46,4 +46,8 @@ op-alloy-rpc-types-engine = { workspace = true, optional = true } workspace = true [features] -optimism = ["op-alloy-rpc-types-engine"] +optimism = [ + "op-alloy-rpc-types-engine", + "reth-beacon-consensus/optimism", + "reth-provider/optimism" +] diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 3a618f4fd7a..6fe741db883 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -79,11 +79,21 @@ assert_matches.workspace = true [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", + "reth-db/test-utils", + "reth-chain-state/test-utils", + "reth-network-p2p/test-utils", + "reth-prune-types", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-blockchain-tree/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils" ] diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 35a7e74bb21..07aa40165e2 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -50,4 +50,9 @@ itertools.workspace = true tracing.workspace = true [features] -optimism = ["reth-beacon-consensus/optimism"] +optimism = [ + "reth-beacon-consensus/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm-primitives/optimism" +] diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 7b4b6c53c09..9f7ce7ee8f3 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -39,12 +39,27 @@ alloy-consensus.workspace = true [features] default = ["std", "serde", "rustc-hash"] -arbitrary = ["dep:arbitrary", "dep:proptest", "dep:proptest-derive"] -serde = ["dep:serde"] +arbitrary = [ + "dep:arbitrary", + "dep:proptest", + "dep:proptest-derive", + "alloy-chains/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-primitives/serde" +] std = [ - "alloy-chains/std", - "alloy-primitives/std", - "thiserror-no-std/std", - "rustc-hash/std", + "alloy-chains/std", + "alloy-primitives/std", + "thiserror-no-std/std", + "rustc-hash/std", + "alloy-consensus/std", + "once_cell/std", + "serde?/std" ] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 8cbc92f90f3..17e870e6111 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -41,4 +41,14 @@ alloy-genesis.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "secp256k1/std" +] diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index a3fe5ed4503..11555cdc4a5 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -55,4 +55,17 @@ alloy-consensus.workspace = true [features] default = [] -test-utils = ["reth-node-builder/test-utils"] +test-utils = [ + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "revm/test-utils", + "reth-evm/test-utils" +] diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 6c16973b28b..6a1e1fe0d72 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -42,5 +42,24 @@ reth-ethereum-forks.workspace = true [features] default = ["std"] -std = ["dep:metrics", "dep:reth-metrics"] -test-utils = ["dep:parking_lot"] +std = [ + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm-primitives/std", + "revm/std" +] +test-utils = [ + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils" +] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index d4f8534e752..721c8055110 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -26,4 +26,9 @@ derive_more.workspace = true [features] default = ["std"] -std = ["reth-consensus/std"] +std = [ + "reth-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm-primitives/std" +] diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 49e9623021e..b6af3dee9af 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -33,10 +33,24 @@ reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } [features] default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] -serde = ["dep:serde", "reth-trie/serde", "revm/serde"] +serde = [ + "dep:serde", + "reth-trie/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "rand/serde" +] serde-bincode-compat = [ - "reth-primitives/serde-bincode-compat", - "reth-trie/serde-bincode-compat", - "serde_with", + "reth-primitives/serde-bincode-compat", + "reth-trie/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat" +] +std = [ + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm/std", + "serde?/std" ] -std = [] diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 27a9d1576c8..903e11e784e 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -70,4 +70,14 @@ tempfile.workspace = true [features] default = [] -serde = ["reth-provider/serde", "reth-exex-types/serde"] +serde = [ + "reth-provider/serde", + "reth-exex-types/serde", + "reth-revm/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde" +] diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index a146cbc2273..51097d6109c 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -33,5 +33,16 @@ rand.workspace = true [features] default = [] -serde = ["dep:serde", "reth-execution-types/serde"] -serde-bincode-compat = ["reth-execution-types/serde-bincode-compat", "serde_with"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "rand/serde" +] +serde-bincode-compat = [ + "reth-execution-types/serde-bincode-compat", + "serde_with", + "reth-primitives/serde-bincode-compat", + "alloy-eips/serde-bincode-compat" +] diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fde652ef397..f008d03b56f 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -51,5 +51,14 @@ reth-tracing.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "discv5/serde", + "enr/serde", + "generic-array/serde", + "parking_lot/serde", + "rand?/serde", + "secp256k1/serde" +] test-utils = ["dep:rand"] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 2af72afcef6..a52f6505744 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -48,4 +48,15 @@ reth-tracing.workspace = true rand.workspace = true [features] -serde = ["dep:serde", "dep:serde_with"] +serde = [ + "dep:serde", + "dep:serde_with", + "alloy-chains/serde", + "alloy-primitives/serde", + "enr/serde", + "linked_hash_set/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde", + "trust-dns-resolver/serde" +] diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 5e7f4dd47a2..272db6fc6d1 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -71,10 +71,14 @@ tempfile.workspace = true [features] test-utils = [ - "dep:tempfile", - "dep:reth-db-api", - "reth-db/test-utils", - "reth-consensus/test-utils", - "reth-network-p2p/test-utils", - "reth-testing-utils", + "dep:tempfile", + "dep:reth-db-api", + "reth-db/test-utils", + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-testing-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db-api?/test-utils", + "reth-provider/test-utils" ] diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 82c9fe37a44..1d2b5487245 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -45,10 +45,22 @@ alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde" ] -serde = ["dep:serde"] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 6eea4bc4ac6..b0e256fdf63 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -66,11 +66,22 @@ alloy-eips.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "reth-eth-wire-types/arbitrary", - "dep:arbitrary", + "reth-primitives/arbitrary", + "reth-eth-wire-types/arbitrary", + "dep:arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "secp256k1/serde" ] -serde = ["dep:serde", "reth-eth-wire-types/serde"] [[test]] name = "fuzz_roundtrip" diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 650d749048c..6d410e9db23 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -40,4 +40,10 @@ derive_more.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "reth-network-types/serde", + "alloy-primitives/serde", + "enr/serde" +] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 1d3af517af3..f444aa7fe27 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -101,8 +101,36 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] geth-tests = [] -serde = ["dep:serde", "secp256k1/serde", "enr/serde", "reth-network-types/serde"] -test-utils = ["dep:reth-provider", "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils"] +serde = [ + "dep:serde", + "secp256k1/serde", + "enr/serde", + "reth-network-types/serde", + "reth-dns-discovery/serde", + "reth-eth-wire/serde", + "reth-provider?/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "discv5/serde", + "parking_lot/serde", + "rand/serde", + "smallvec/serde", + "url/serde" +] +test-utils = [ + "dep:reth-provider", + "reth-provider?/test-utils", + "dep:tempfile", + "reth-transaction-pool/test-utils", + "reth-network-types/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-discv4/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils" +] [[bench]] name = "bench" diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index c43f7f5b347..3b6d74c9dbe 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -43,5 +43,15 @@ tokio = { workspace = true, features = ["full"] } [features] default = ["std"] -test-utils = ["reth-consensus/test-utils", "parking_lot"] -std = ["reth-consensus/std"] +test-utils = [ + "reth-consensus/test-utils", + "parking_lot", + "reth-network-types/test-utils", + "reth-primitives/test-utils" +] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std" +] diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 53e53cd2b85..86f755cb920 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -96,4 +96,21 @@ tempfile.workspace = true [features] default = [] -test-utils = ["reth-db/test-utils"] +test-utils = [ + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils" +] diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index a6ae1db5e01..73c552f4d7a 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -76,10 +76,18 @@ proptest.workspace = true tokio.workspace = true [features] -optimism = ["reth-primitives/optimism"] +optimism = [ + "reth-primitives/optimism", + "reth-db/optimism" +] # Features for vergen to generate correct env vars -jemalloc = [] -asm-keccak = [] +jemalloc = [ + "reth-cli-util/jemalloc" +] +asm-keccak = [ + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak" +] [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 2de0bb6ee18..f60ef36a466 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -37,7 +37,15 @@ tracy-allocator = ["reth-cli-util/tracy-allocator"] asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] -optimism = ["reth-optimism-cli/optimism", "reth-optimism-node/optimism"] +optimism = [ + "reth-optimism-cli/optimism", + "reth-optimism-node/optimism", + "reth-optimism-consensus/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-optimism-rpc/optimism", + "reth-provider/optimism" +] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index efc9bf0b012..6b068dabbf0 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -45,12 +45,14 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-genesis/std", - "alloy-primitives/std", - "op-alloy-rpc-types/std", - "reth-chainspec/std", - "reth-ethereum-forks/std", - "reth-primitives-traits/std", - "reth-optimism-forks/std", + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std" ] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index d53270cd62f..7db41ccbe84 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -73,11 +73,14 @@ reth-cli-commands.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-optimism-evm/optimism", - "reth-provider/optimism", - "reth-node-core/optimism", - "reth-optimism-node/optimism", + "reth-primitives/optimism", + "reth-optimism-evm/optimism", + "reth-provider/optimism", + "reth-node-core/optimism", + "reth-optimism-node/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f251347c58b..f6b22ad14c8 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -51,10 +51,21 @@ alloy-consensus.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "revm/std" +] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", - "revm/optimism", + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", + "revm/optimism", + "revm-primitives/optimism" ] diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 815d50c6bcc..c30566a54eb 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -27,5 +27,13 @@ once_cell.workspace = true [features] default = ["std"] -std = [] -serde = ["dep:serde"] \ No newline at end of file +std = [ + "alloy-primitives/std", + "once_cell/std", + "serde?/std" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-primitives/serde" +] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index fbe787d6e16..37cf4a328ea 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -69,15 +69,35 @@ op-alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-auto-seal-consensus/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-auto-seal-consensus/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism" +] +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak" +] +test-utils = [ + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "revm/test-utils" ] -asm-keccak = ["reth-primitives/asm-keccak"] -test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 46cc82edb6c..de61def8350 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -50,8 +50,11 @@ sha2.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "revm/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "revm/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", + "revm-primitives/optimism" ] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 90984998ac7..dc0f96c4012 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -63,8 +63,9 @@ reth-optimism-chainspec.workspace = true [features] optimism = [ - "reth-optimism-evm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "revm/optimism", + "reth-optimism-evm/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm/optimism", + "reth-optimism-consensus/optimism" ] diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 107b64db3de..2b18897d94a 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -20,4 +20,8 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true [features] -optimism = ["reth-primitives/optimism"] \ No newline at end of file +optimism = [ + "reth-primitives/optimism", + "reth-codecs/optimism", + "reth-db-api/optimism" +] diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 71f63ce34c2..3b71011e02e 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -40,4 +40,10 @@ tracing.workspace = true revm.workspace = true [features] -test-utils = ["reth-chain-state"] +test-utils = [ + "reth-chain-state", + "reth-chain-state?/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "revm/test-utils" +] diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 2fec7566656..9634da40f47 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -54,14 +54,30 @@ test-fuzz.workspace = true [features] default = ["std"] -std = [] -test-utils = ["arbitrary"] +std = [ + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "serde/std" +] +test-utils = [ + "arbitrary", + "reth-codecs/test-utils" +] arbitrary = [ - "std", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "std", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "alloy-eips/arbitrary", + "revm-primitives/arbitrary" +] +serde-bincode-compat = [ + "serde_with", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat" ] -serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 5661fb8f846..566a114bebf 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -90,18 +90,41 @@ pprof = { workspace = true, features = [ [features] default = ["c-kzg", "alloy-compat", "std", "reth-codec", "secp256k1"] -std = ["reth-primitives-traits/std"] +std = [ + "reth-primitives-traits/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-serde?/std", + "k256/std", + "once_cell/std", + "revm-primitives/std", + "secp256k1?/std", + "serde/std" +] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] -asm-keccak = ["alloy-primitives/asm-keccak"] +asm-keccak = [ + "alloy-primitives/asm-keccak", + "revm-primitives/asm-keccak" +] arbitrary = [ - "dep:arbitrary", - "alloy-eips/arbitrary", - "rand", - "reth-codec", - "reth-ethereum-forks/arbitrary", - "reth-primitives-traits/arbitrary", - "revm-primitives/arbitrary", - "secp256k1", + "dep:arbitrary", + "alloy-eips/arbitrary", + "rand", + "reth-codec", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "revm-primitives/arbitrary", + "secp256k1", + "reth-chainspec/arbitrary", + "reth-trie-common/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "alloy-rpc-types?/arbitrary", + "alloy-serde?/arbitrary", + "op-alloy-consensus?/arbitrary", + "op-alloy-rpc-types?/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ @@ -121,12 +144,18 @@ alloy-compat = [ "dep:alloy-serde", "dep:op-alloy-rpc-types", ] -test-utils = ["reth-primitives-traits/test-utils"] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-chainspec/test-utils", + "reth-codecs?/test-utils", + "reth-trie-common/test-utils" +] serde-bincode-compat = [ - "alloy-consensus/serde-bincode-compat", - "op-alloy-consensus?/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat", - "serde_with", + "alloy-consensus/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat" ] [[bench]] diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 668abb79e38..8f670d364b8 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -34,6 +34,21 @@ alloy-primitives.workspace = true [features] default = ["std"] -std = [] -test-utils = ["dep:reth-trie"] -serde = ["revm/serde"] +std = [ + "reth-primitives/std", + "alloy-primitives/std", + "revm/std", + "alloy-eips/std" +] +test-utils = [ + "dep:reth-trie", + "reth-primitives/test-utils", + "reth-trie?/test-utils", + "revm/test-utils" +] +serde = [ + "revm/serde", + "reth-trie?/serde", + "alloy-eips/serde", + "alloy-primitives/serde" +] diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 352d3e02476..cba569a2a43 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -46,4 +46,9 @@ tokio-stream.workspace = true reth-testing-utils.workspace = true [features] -test-utils = [] +test-utils = [ + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils" +] diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 81f35a4b390..0b26cb6a1e7 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -93,13 +93,24 @@ pprof = { workspace = true, features = [ [features] test-utils = [ - "dep:reth-chainspec", - "reth-network-p2p/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-stages-api/test-utils", - "dep:reth-testing-utils", - "dep:tempfile", + "dep:reth-chainspec", + "reth-network-p2p/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-stages-api/test-utils", + "dep:reth-testing-utils", + "dep:tempfile", + "reth-chainspec?/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils" ] [[bench]] diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 21a1897f1c7..2525b4e8d7f 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -49,7 +49,15 @@ serde.workspace = true [features] default = ["std", "alloy"] -std = ["alloy-primitives/std", "bytes/std"] +std = [ + "alloy-primitives/std", + "bytes/std", + "alloy-consensus?/std", + "alloy-eips?/std", + "alloy-genesis?/std", + "alloy-trie?/std", + "serde/std" +] alloy = [ "dep:alloy-consensus", "dep:alloy-eips", diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index d674f9d7b68..932a94b98eb 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -56,11 +56,25 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true [features] -test-utils = ["arbitrary"] +test-utils = [ + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils" +] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-models/arbitrary", - "dep:arbitrary", - "dep:proptest", + "reth-primitives/arbitrary", + "reth-db-models/arbitrary", + "dep:arbitrary", + "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary" +] +optimism = [ + "reth-primitives/optimism", + "reth-codecs/optimism" ] -optimism = ["reth-primitives/optimism"] diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 492178775b6..31741207cad 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -39,5 +39,14 @@ proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true [features] -test-utils = ["arbitrary"] -arbitrary = ["reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest"] +test-utils = [ + "arbitrary", + "reth-primitives/test-utils", + "reth-codecs/test-utils" +] +arbitrary = [ + "reth-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "alloy-primitives/arbitrary" +] diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 356672f2548..2f437e63109 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -89,10 +89,28 @@ mdbx = [ "dep:strum", "dep:rustc-hash", ] -test-utils = ["dep:tempfile", "arbitrary", "parking_lot"] +test-utils = [ + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils" +] bench = [] -arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] -optimism = [] +arbitrary = [ + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary" +] +optimism = [ + "reth-primitives/optimism", + "reth-db-api/optimism" +] disable-lock = [] [[bench]] diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 52c93ae4ef0..ecefa5f6aca 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -25,4 +25,8 @@ derive_more.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std" +] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 00e1c9f098d..b93c22cdf67 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -88,16 +88,41 @@ alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-primitives", + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-primitives", + "reth-codecs/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism" +] +serde = [ + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-rpc-types-engine/serde", + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "revm/serde" ] -serde = ["reth-execution-types/serde"] test-utils = [ - "reth-db/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie/test-utils", - "reth-chain-state/test-utils", - "reth-ethereum-engine-primitives", - "alloy-consensus", + "reth-db/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie/test-utils", + "reth-chain-state/test-utils", + "reth-ethereum-engine-primitives", + "alloy-consensus", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils" ] diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index cdac6a1aae6..1bfb10d86d7 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -72,12 +72,42 @@ serde_json.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] -test-utils = ["rand", "paste", "serde"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "reth-eth-wire-types/serde", + "reth-provider/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bitflags/serde", + "parking_lot/serde", + "rand?/serde", + "revm/serde", + "smallvec/serde" +] +test-utils = [ + "rand", + "paste", + "serde", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "revm/test-utils" +] arbitrary = [ - "proptest", - "reth-primitives/arbitrary", - "proptest-arbitrary-interop", + "proptest", + "reth-primitives/arbitrary", + "proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "reth-eth-wire-types/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "bitflags/arbitrary", + "revm/arbitrary", + "smallvec/arbitrary" ] [[bench]] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0bd28140f44..2c6ccbfe689 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -41,8 +41,19 @@ hash-db = "=0.15.2" plain_hasher = "0.2" [features] -test-utils = ["dep:plain_hasher", "dep:hash-db", "arbitrary"] +test-utils = [ + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils" +] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", + "alloy-trie/arbitrary", + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary" ] diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index e75b0456eb9..55fa9a851b1 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -66,5 +66,23 @@ similar-asserts.workspace = true [features] metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] -serde = ["dep:serde"] -test-utils = ["triehash", "reth-trie-common/test-utils"] +serde = [ + "dep:serde", + "reth-provider/serde", + "reth-trie/serde", + "alloy-consensus/serde", + "alloy-primitives/serde", + "revm/serde", + "similar-asserts/serde" +] +test-utils = [ + "triehash", + "reth-trie-common/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", + "revm/test-utils" +] diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 77fc5739770..112e661c027 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -64,9 +64,23 @@ bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] -serde = ["dep:serde"] -serde-bincode-compat = ["serde_with"] -test-utils = ["triehash", "reth-trie-common/test-utils"] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-primitives/serde", + "revm/serde" +] +serde-bincode-compat = [ + "serde_with", + "reth-primitives/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat" +] +test-utils = [ + "triehash", + "reth-trie-common/test-utils", + "reth-primitives/test-utils", + "revm/test-utils" +] [[bench]] name = "prefix_set" diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index df68f5154fc..a56c44ec3db 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -13,7 +13,11 @@ workspace = true [features] ef-tests = [] -asm-keccak = ["reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak" +] [dependencies] reth-chainspec.workspace = true From 422ab1735407c8e9de8ffa24adb416132d41f351 Mon Sep 17 00:00:00 2001 From: caglarkaya Date: Sun, 20 Oct 2024 01:14:52 +0300 Subject: [PATCH 054/970] feat: use next free nonce in eth_sendTransaction (#11873) --- crates/rpc/rpc-eth-api/src/helpers/state.rs | 36 +++++++++++++++++++ .../rpc-eth-api/src/helpers/transaction.rs | 5 ++- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index f2fc13f5d03..080d90dc3b0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -272,6 +272,42 @@ pub trait LoadState: EthApiTypes { } } + /// Returns the next available nonce without gaps for the given address + /// Next available nonce is either the on chain nonce of the account or the highest consecutive + /// nonce in the pool + 1 + fn next_available_nonce( + &self, + address: Address, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + // first fetch the on chain nonce of the account + let on_chain_account_nonce = this + .latest_state()? + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(); + + let mut next_nonce = on_chain_account_nonce; + // Retrieve the highest consecutive transaction for the sender from the transaction pool + if let Some(highest_tx) = this + .pool() + .get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce) + { + // Return the nonce of the highest consecutive transaction + 1 + next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + } + + Ok(next_nonce) + }) + } + /// Returns the number of transactions sent from an address at the given block identifier. /// /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 54d60cb7abd..d29787d7a23 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -365,9 +365,8 @@ pub trait EthTransactions: LoadTransaction { // set nonce if not already set before if request.nonce.is_none() { - let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; - // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to()); + let nonce = self.next_available_nonce(from).await?; + request.nonce = Some(nonce); } let chain_id = self.chain_id(); From 453ba2d9accc2b85d6495c51306843899755810f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sun, 20 Oct 2024 10:46:18 +0200 Subject: [PATCH 055/970] feat: switch to composable executor for Optimism (#11846) --- crates/optimism/evm/src/execute.rs | 497 ++++++++-------------------- crates/optimism/evm/src/lib.rs | 2 - crates/optimism/evm/src/strategy.rs | 494 --------------------------- crates/optimism/node/src/node.rs | 10 +- 4 files changed, 150 insertions(+), 853 deletions(-) delete mode 100644 crates/optimism/evm/src/strategy.rs diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index d15cdee13d6..77f67066851 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,146 +1,134 @@ -//! Optimism block executor. +//! Optimism block execution strategy. -use crate::{ - l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, -}; +use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; -use alloy_primitives::{BlockNumber, U256}; use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; +use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, + BlockExecutionStrategyFactory, BlockValidationError, ProviderError, }, state_change::post_block_balance_increments, - system_calls::{NoopHook, OnStateHook, SystemCaller}, - ConfigureEvm, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, ConfigureEvmEnv, }; -use reth_execution_types::ExecutionOutcome; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts, TxType}; -use reth_prune_types::PruneModes; -use reth_revm::{batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, Evm, State}; +use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + Database, State, +}; use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, }; use tracing::trace; -/// Provides executors to execute regular optimism blocks +/// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) } } -impl OpExecutorProvider { - /// Creates a new executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl OpExecutorProvider -where - EvmConfig: ConfigureEvm
, -{ - fn op_executor(&self, db: DB) -> OpBlockExecutor - where - DB: Database + Display>, - { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) - } -} +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { + type Strategy + Display>> = OpExecutionStrategy; -impl BlockExecutorProvider for OpExecutorProvider -where - EvmConfig: ConfigureEvm
, -{ - type Executor + Display>> = - OpBlockExecutor; - - type BatchExecutor + Display>> = - OpBatchExecutor; - fn executor(&self, db: DB) -> Self::Executor + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - self.op_executor(db) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.op_executor(db); - OpBatchExecutor { executor, batch_record: BlockBatchRecord::default() } + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } } -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -pub struct OpEvmExecutor { +/// Block execution strategy for Optimism. +#[allow(missing_debug_implementations)] +pub struct OpExecutionStrategy { /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl OpEvmExecutor +impl OpExecutionStrategy { + /// Creates a new [`OpExecutionStrategy`] + pub fn new( + state: State, + chain_spec: Arc, + evm_config: OptimismEvmConfig, + ) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + Self { state, chain_spec, evm_config, system_caller } + } +} + +impl OpExecutionStrategy { + /// Configures a new evm configuration and block environment for the given block. + /// + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + DB: Database + Display>, { - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. - /// - /// # Note - /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &self, + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database + Display>, - F: OnStateHook + 'static, - { - let mut system_caller = SystemCaller::new(self.evm_config.clone(), &self.chain_spec); - if let Some(hook) = state_hook { - system_caller.with_state_hook(Some(Box::new(hook) as Box)); - } + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // apply pre execution changes - system_caller.apply_beacon_root_contract_call( + self.system_caller.apply_beacon_root_contract_call( block.timestamp, block.number, block.parent_beacon_block_root, &mut evm, )?; - // execute transactions - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that @@ -148,6 +136,20 @@ where ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let is_regolith = + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -200,7 +202,7 @@ where ?transaction, "Executed transaction" ); - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -225,288 +227,63 @@ where .then_some(1), }); } - drop(evm); Ok((receipts, cumulative_gas_used)) } -} - -/// A basic Optimism block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, -} - -impl OpBlockExecutor { - /// Creates a new Optimism block executor. - pub const fn new( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - ) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } - } - - /// Returns the chain spec. - #[inline] - pub fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - F: OnStateHook + 'static, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions(block, evm, state_hook) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { + _receipts: &[Receipt], + ) -> Result { let balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); // increment balances self.state .increment_balances(balance_increments) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - Ok(()) + Ok(Requests::default()) } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn state_mut(&mut self) -> &mut State { + &mut self.state } - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook + 'static, - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: Requests::default(), - gas_used, - }) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } -} - -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub const fn receipts(&self) -> &Receipts { - self.batch_record.receipts() + fn finish(&mut self) -> BundleState { + self.state.merge_transitions(BundleRetention::Reverts); + self.state.take_bundle() } - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts) } } -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - let (receipts, _gas_used) = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(()) - } - - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) - } - - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } +/// Helper type with backwards compatible methods to obtain executor providers. +#[derive(Debug)] +pub struct OpExecutorProvider; - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) +impl OpExecutorProvider { + /// Creates a new default optimism executor strategy factory. + pub fn optimism( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(OpExecutionStrategyFactory::optimism(chain_spec)) } } @@ -517,6 +294,7 @@ mod tests { use alloy_consensus::TxEip1559; use alloy_primitives::{b256, Address, StorageKey, StorageValue}; use reth_chainspec::MIN_TRANSACTION_GAS; + use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; use reth_revm::{ @@ -551,8 +329,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { evm_config: OptimismEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -600,7 +383,10 @@ mod tests { let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // Attempt to execute a block with one deposit and one non-deposit transaction executor @@ -622,8 +408,9 @@ mod tests { ) .unwrap(); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is not present in pre canyon transactions assert!(deposit_receipt.deposit_receipt_version.is_none()); @@ -680,7 +467,10 @@ mod tests { let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -702,8 +492,9 @@ mod tests { ) .expect("Executing a block while canyon is active should not fail"); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is set to 1 for post canyon deposit transactions assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index ffc82fde43c..60aa9f7db08 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -36,8 +36,6 @@ use revm_primitives::{ BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, }; -pub mod strategy; - /// Optimism-related EVM configuration. #[derive(Debug, Clone)] pub struct OptimismEvmConfig { diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs deleted file mode 100644 index c626bb66587..00000000000 --- a/crates/optimism/evm/src/strategy.rs +++ /dev/null @@ -1,494 +0,0 @@ -//! Optimism block execution strategy, - -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; -use alloy_eips::eip7685::Requests; -use core::fmt::Display; -use reth_chainspec::EthereumHardforks; -use reth_consensus::ConsensusError; -use reth_evm::{ - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, - BlockValidationError, ProviderError, - }, - state_change::post_block_balance_increments, - system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - Database, State, -}; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; -use tracing::trace; - -/// Factory for [`OpExecutionStrategy`]. -#[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl OpExecutionStrategyFactory { - /// Creates a new default optimism executor strategy factory. - pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) - } -} - -impl OpExecutionStrategyFactory { - /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } - } -} - -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { - type Strategy + Display>> = OpExecutionStrategy; - - fn create_strategy(&self, db: DB) -> Self::Strategy - where - DB: Database + Display>, - { - let state = - State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) - } -} - -/// Block execution strategy for Optimism. -#[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, - /// Current state for block execution. - state: State, - /// Utility to call system smart contracts. - system_caller: SystemCaller, -} - -impl OpExecutionStrategy { - /// Creates a new [`OpExecutionStrategy`] - pub fn new( - state: State, - chain_spec: Arc, - evm_config: OptimismEvmConfig, - ) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); - Self { state, chain_spec, evm_config, system_caller } - } -} - -impl OpExecutionStrategy { - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } -} - -impl BlockExecutionStrategy for OpExecutionStrategy -where - DB: Database + Display>, -{ - type Error = BlockExecutionError; - - fn apply_pre_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error> { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); - self.state.set_state_clear_flag(state_clear_flag); - - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - self.system_caller.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; - - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - - self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); - - // Execute transaction. - let result_and_state = evm.transact().map_err(move |err| { - let new_err = err.map_db_err(|e| e.into()); - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } - })?; - - trace!( - target: "evm", - ?transaction, - "Executed transaction" - ); - self.system_caller.on_state(&result_and_state); - let ResultAndState { result, state } = result_and_state; - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn apply_post_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - _receipts: &[Receipt], - ) -> Result { - let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(Requests::default()) - } - - fn state_ref(&self) -> &State { - &self.state - } - - fn state_mut(&mut self) -> &mut State { - &mut self.state - } - - fn with_state_hook(&mut self, hook: Option>) { - self.system_caller.with_state_hook(hook); - } - - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - receipts: &[Receipt], - _requests: &Requests, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::OpChainSpec; - use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; - use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; - use reth_revm::{ - database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, - }; - use std::{collections::HashMap, str::FromStr}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::default(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn executor_provider( - chain_spec: Arc, - ) -> BasicBlockExecutorProvider { - let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); - - BasicBlockExecutorProvider::new(strategy_factory) - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - Signature::test_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - optimism_deposit_tx_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .expect("Executing a block while canyon is active should not fail"); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 175b2d4bf41..22fc1a88ff7 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; -use reth_evm::ConfigureEvm; +use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; use reth_node_api::{ AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, @@ -20,7 +20,7 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OptimismBeaconConsensus; -use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; +use reth_optimism_evm::{OpExecutionStrategyFactory, OptimismEvmConfig}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Header}; @@ -184,14 +184,16 @@ where Node: FullNodeTypes>, { type EVM = OptimismEvmConfig; - type Executor = OpExecutorProvider; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); - let executor = OpExecutorProvider::new(ctx.chain_spec(), evm_config.clone()); + let strategy_factory = + OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } From de07436f0e5d534873cd4670f75df8918abf2bec Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 20 Oct 2024 12:43:31 +0000 Subject: [PATCH 056/970] chore(deps): weekly `cargo update` (#11902) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 213 ++++++++++++++++++++++++++--------------------------- 1 file changed, 105 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4def3dcabed..19287e6669a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.38" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "156bfc5dcd52ef9a5f33381701fa03310317e14c65093a9430d3e3557b08dcd3" +checksum = "d4932d790c723181807738cf1ac68198ab581cd699545b155601332541ee47bd" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -394,7 +394,7 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -618,7 +618,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -634,7 +634,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "syn-solidity", "tiny-keccak", ] @@ -650,7 +650,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "syn-solidity", ] @@ -841,9 +841,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" [[package]] name = "aquamarine" @@ -856,7 +856,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1032,9 +1032,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26a9844c659a2a293d239c7910b752f8487fe122c6c8bd1659bf85a6507c302" +checksum = "103db485efc3e41214fe4fda9f3dbeae2eb9082f48fd236e6095627a9422066e" dependencies = [ "brotli", "flate2", @@ -1079,7 +1079,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1090,7 +1090,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1128,7 +1128,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1234,7 +1234,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1416,7 +1416,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -1538,7 +1538,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -1626,9 +1626,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.30" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "jobserver", "libc", @@ -1750,7 +1750,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2038,9 +2038,9 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64009896348fc5af4222e9cf7d7d82a95a256c634ebcf61c53e4ea461422242" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crossbeam-channel" @@ -2207,7 +2207,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2231,7 +2231,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2242,7 +2242,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2363,7 +2363,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2374,7 +2374,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2395,7 +2395,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "unicode-xid", ] @@ -2509,7 +2509,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2657,7 +2657,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2668,7 +2668,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -2725,7 +2725,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -3263,7 +3263,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -3707,9 +3707,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -3789,7 +3789,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -3939,7 +3939,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4107,7 +4107,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4252,9 +4252,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f01f48e04e0d7da72280ab787c9943695699c9b32b99158ece105e8ad0afea" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4270,9 +4270,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d80eccbd47a7b9f1e67663fd846928e941cb49c65236e297dd11c9ea3c5e3387" +checksum = "548125b159ba1314104f5bb5f38519e03a41862786aa3925cf349aae9cdd546e" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4295,9 +4295,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2709a32915d816a6e8f625bf72cf74523ebe5d8829f895d6b041b1d3137818" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ "async-trait", "bytes", @@ -4322,9 +4322,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc54db939002b030e794fbfc9d5a925aa2854889c5a2f0352b0bffa54681707e" +checksum = "b3638bc4617f96675973253b3a45006933bde93c2fd8a6170b33c777cc389e5b" dependencies = [ "async-trait", "base64 0.22.1", @@ -4347,22 +4347,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9a4b2eaba8cc928f49c4ccf4fcfa65b690a73997682da99ed08f3393b51f07" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "jsonrpsee-server" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30110d0f2d7866c8cc6c86483bdab2eb9f4d2f0e20db55518b2bca84651ba8e" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ "futures-util", "http", @@ -4387,9 +4387,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca331cd7b3fe95b33432825c2d4c9f5a43963e207fdc01ae67f9fd80ab0930f" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ "http", "serde", @@ -4399,9 +4399,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c603d97578071dc44d79d3cfaf0775437638fd5adc33c6b622dfe4fa2ec812d" +checksum = "1a01cd500915d24ab28ca17527e23901ef1be6d659a2322451e1045532516c25" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4410,9 +4410,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755ca3da1c67671f1fae01cd1a47f41dfb2233a8f19a643e587ab0a663942044" +checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" dependencies = [ "http", "jsonrpsee-client-transport", @@ -4515,9 +4515,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.159" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -4526,7 +4526,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -4773,7 +4773,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -4920,7 +4920,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5168,7 +5168,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5547,7 +5547,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5576,7 +5576,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5743,12 +5743,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "904afd36257cdb6ce0bee88b7981847bd7b955e5e216bb32f466b302923ad446" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -5823,14 +5823,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "proc-macro2" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" +checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" dependencies = [ "unicode-ident", ] @@ -5921,7 +5921,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -6744,7 +6744,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -9589,9 +9589,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" dependencies = [ "log", "once_cell", @@ -9881,14 +9881,14 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.6.0", "itoa", @@ -9916,7 +9916,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -9967,7 +9967,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -9990,7 +9990,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10286,7 +10286,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10344,9 +10344,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "e6e185e337f816bc8da115b8afcb3324006ccc82eeaddf35113888d3bd8e44ac" dependencies = [ "proc-macro2", "quote", @@ -10362,7 +10362,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10388,7 +10388,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10465,7 +10465,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10504,7 +10504,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10681,7 +10681,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -10882,7 +10882,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11106,12 +11106,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" @@ -11229,9 +11226,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", ] @@ -11330,7 +11327,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "wasm-bindgen-shared", ] @@ -11364,7 +11361,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11520,7 +11517,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11531,7 +11528,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11542,7 +11539,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11553,7 +11550,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11828,7 +11825,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -11850,7 +11847,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11870,7 +11867,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", "synstructure", ] @@ -11891,7 +11888,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] @@ -11913,7 +11910,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.80", ] [[package]] From e9c09723edf7b75a8dcdc5f3d458307120713356 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Sun, 20 Oct 2024 22:39:42 +0800 Subject: [PATCH 057/970] docs(blockchain-tree): rm comment (#11903) --- crates/blockchain-tree/src/blockchain_tree.rs | 3 --- crates/blockchain-tree/src/externals.rs | 1 - 2 files changed, 4 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1e2ed2a4a2e..95c0361f31f 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -113,9 +113,6 @@ where /// is crucial for the correct execution of transactions. /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect /// its structure or performance. - /// - `prune_modes`: Configuration for pruning old blockchain data. This helps in managing the - /// storage space efficiently. It's important to validate this configuration to ensure it does - /// not lead to unintended data loss. pub fn new( externals: TreeExternals, config: BlockchainTreeConfig, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 719852c12ac..4e22fcb78b6 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -21,7 +21,6 @@ use std::{collections::BTreeMap, sync::Arc}; /// - A handle to the database /// - A handle to the consensus engine /// - The executor factory to execute blocks with -/// - The chain spec #[derive(Debug)] pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. From 5fca07ca8752163aecfe1334fa8474565db20b66 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sun, 20 Oct 2024 21:23:31 +0200 Subject: [PATCH 058/970] fix: impl BlockExecutionStrategy for OpExecutionStrategy generic over EvmConfig (#11910) --- crates/optimism/evm/src/execute.rs | 36 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 77f67066851..10ac5c5250a 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -14,7 +14,7 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; @@ -52,8 +52,13 @@ impl OpExecutionStrategyFactory { } } -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { - type Strategy + Display>> = OpExecutionStrategy; +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory +where + EvmConfig: + Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, +{ + type Strategy + Display>> = + OpExecutionStrategy; fn create_strategy(&self, db: DB) -> Self::Strategy where @@ -67,7 +72,10 @@ impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { /// Block execution strategy for Optimism. #[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy { +pub struct OpExecutionStrategy +where + EvmConfig: Clone, +{ /// The chainspec chain_spec: Arc, /// How to create an EVM. @@ -78,19 +86,22 @@ pub struct OpExecutionStrategy { system_caller: SystemCaller, } -impl OpExecutionStrategy { +impl OpExecutionStrategy +where + EvmConfig: Clone, +{ /// Creates a new [`OpExecutionStrategy`] - pub fn new( - state: State, - chain_spec: Arc, - evm_config: OptimismEvmConfig, - ) -> Self { + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); Self { state, chain_spec, evm_config, system_caller } } } -impl OpExecutionStrategy { +impl OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ /// Configures a new evm configuration and block environment for the given block. /// /// Caution: this does not initialize the tx environment. @@ -103,9 +114,10 @@ impl OpExecutionStrategy { } } -impl BlockExecutionStrategy for OpExecutionStrategy +impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, + EvmConfig: ConfigureEvm
, { type Error = BlockExecutionError; From a188597a3c6a9590ab54cfc9e02c18e7e0760be3 Mon Sep 17 00:00:00 2001 From: Oliver Date: Sun, 20 Oct 2024 22:55:14 +0200 Subject: [PATCH 059/970] ci: merge sync jobs (#11909) --- .github/workflows/eth-sync.yml | 53 ---------------------------- .github/workflows/op-sync.yml | 55 ----------------------------- .github/workflows/sync.yml | 63 ++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 108 deletions(-) delete mode 100644 .github/workflows/eth-sync.yml delete mode 100644 .github/workflows/op-sync.yml create mode 100644 .github/workflows/sync.yml diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml deleted file mode 100644 index f473e29a57c..00000000000 --- a/.github/workflows/eth-sync.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Runs an ethereum mainnet sync test. - -name: eth-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build reth - run: | - cargo install --features asm-keccak,jemalloc --path bin/reth - - name: Run sync - run: | - reth node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - reth db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - - name: Run stage unwind for 100 blocks - run: | - reth stage unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - reth stage unwind to-block 0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml deleted file mode 100644 index 2a223391d71..00000000000 --- a/.github/workflows/op-sync.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Runs a base mainnet sync test. - -name: op-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: op sync / 10k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build op-reth - run: make install-op - - name: Run sync - # https://basescan.org/block/10000 - run: | - op-reth node \ - --chain base \ - --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ - --debug.max-block 10000 \ - --debug.terminate - - name: Verify the target block hash - run: | - op-reth db --chain base get static-file headers 10000 \ - | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 - - name: Run stage unwind for 100 blocks - run: | - op-reth stage --chain base unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - op-reth stage --chain base unwind to-block 0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de - diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 00000000000..531d04b2e48 --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,63 @@ +# Runs sync tests. + +name: sync test + +on: + merge_group: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync (${{ matrix.chain.bin }}) + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + strategy: + matrix: + chain: + - build: install + bin: reth + chain: mainnet + tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + block: 100000 + unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + - build: install-op + bin: op-reth + chain: base + tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" + block: 10000 + unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build ${{ matrix.chain.bin }} + run: make ${{ matrix.chain.build }} + - name: Run sync + run: | + ${{ matrix.chain.bin }} node \ + --chain ${{ matrix.chain.chain }} \ + --debug.tip ${{ matrix.chain.tip }} \ + --debug.max-block ${{ matrix.chain.block }} \ + --debug.terminate + - name: Verify the target block hash + run: | + ${{ matrix.chain.bin }} db --chain ${{ matrix.chain.chain }} get static-file headers ${{ matrix.chain.block }} \ + | grep ${{ matrix.chain.tip }} + - name: Run stage unwind for 100 blocks + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind num-blocks 100 + - name: Run stage unwind to block hash + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind to-block ${{ matrix.chain.unwind-target }} From 0270128d4f7a9f6fad27dff69273095abdfa7452 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 20 Oct 2024 23:42:45 +0200 Subject: [PATCH 060/970] refactor(txpool): small refactor in `DiskFileBlobStoreInner` `get_exact` (#11911) --- crates/transaction-pool/src/blobstore/disk.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 96119a0f817..e168a1c1111 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -409,13 +409,9 @@ impl DiskFileBlobStoreInner { /// Returns an error if there are any missing blobs. #[inline] fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut res = Vec::with_capacity(txs.len()); - for tx in txs { - let blob = self.get_one(tx)?.ok_or_else(|| BlobStoreError::MissingSidecar(tx))?; - res.push(blob) - } - - Ok(res) + txs.into_iter() + .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) + .collect() } } From 2e8a8fe6f6147123ecb2892692ed09ef7e95d0ff Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 01:10:36 +0200 Subject: [PATCH 061/970] doc(storage): add missing documentation for `nippy-jar` (#11913) --- .../storage/nippy-jar/src/compression/mod.rs | 2 + .../storage/nippy-jar/src/compression/zstd.rs | 4 ++ crates/storage/nippy-jar/src/consistency.rs | 5 +++ crates/storage/nippy-jar/src/cursor.rs | 7 +++- crates/storage/nippy-jar/src/error.rs | 39 +++++++++++++++++++ crates/storage/nippy-jar/src/lib.rs | 7 +++- crates/storage/nippy-jar/src/writer.rs | 12 ++++++ 7 files changed, 72 insertions(+), 4 deletions(-) diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 28a92fe909f..f9bf8110eeb 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -44,7 +44,9 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub enum Compressors { + /// Zstandard compression algorithm with custom settings. Zstd(Zstd), + /// LZ4 compression algorithm with custom settings. Lz4(Lz4), } diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 500247d1767..7685941dfbe 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -12,10 +12,13 @@ pub use zstd::{bulk::Decompressor, dict::DecoderDictionary}; type RawDictionary = Vec; +/// Represents the state of a Zstandard compression operation. #[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub enum ZstdState { + /// The compressor is pending a dictionary. #[default] PendingDictionary, + /// The compressor is ready to perform compression. Ready, } @@ -51,6 +54,7 @@ impl Zstd { } } + /// Sets the compression level for the Zstd compression instance. pub const fn with_level(mut self, level: i32) -> Self { self.level = level; self diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs index 1093fb5546a..952980ef6ef 100644 --- a/crates/storage/nippy-jar/src/consistency.rs +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -28,6 +28,11 @@ pub struct NippyJarChecker { } impl NippyJarChecker { + /// Creates a new instance of [`NippyJarChecker`] with the provided [`NippyJar`]. + /// + /// This method initializes the checker without any associated file handles for + /// the data or offsets files. The [`NippyJar`] passed in contains all necessary + /// configurations for handling data. pub const fn new(jar: NippyJar) -> Self { Self { jar, data_file: None, offsets_file: None } } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 26776482729..376411ac265 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -25,9 +25,10 @@ impl std::fmt::Debug for NippyJarCursor<'_, H> { } impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { + /// Creates a new instance of [`NippyJarCursor`] for the given [`NippyJar`]. pub fn new(jar: &'a NippyJar) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader: Arc::new(jar.open_data_reader()?), // Makes sure that we have enough buffer capacity to decompress any row of data. @@ -36,12 +37,14 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { }) } + /// Creates a new instance of [`NippyJarCursor`] with the specified [`NippyJar`] and data + /// reader. pub fn with_reader( jar: &'a NippyJar, reader: Arc, ) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader, // Makes sure that we have enough buffer capacity to decompress any row of data. diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index fc096cf848c..f69bb44a068 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -4,53 +4,92 @@ use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. #[derive(Error, Debug)] pub enum NippyJarError { + /// An internal error occurred, wrapping any type of error. #[error(transparent)] Internal(#[from] Box), + + /// An error occurred while disconnecting, wrapping a standard I/O error. #[error(transparent)] Disconnect(#[from] std::io::Error), + + /// An error related to the file system occurred, wrapping a file system path error. #[error(transparent)] FileSystem(#[from] reth_fs_util::FsPathError), + + /// A custom error message provided by the user. #[error("{0}")] Custom(String), + + /// An error occurred during serialization/deserialization with Bincode. #[error(transparent)] Bincode(#[from] Box), + + /// An error occurred with the Elias-Fano encoding/decoding process. #[error(transparent)] EliasFano(#[from] anyhow::Error), + + /// Compression was enabled, but the compressor is not ready yet. #[error("compression was enabled, but it's not ready yet")] CompressorNotReady, + + /// Decompression was enabled, but the decompressor is not ready yet. #[error("decompression was enabled, but it's not ready yet")] DecompressorNotReady, + + /// The number of columns does not match the expected length. #[error("number of columns does not match: {0} != {1}")] ColumnLenMismatch(usize, usize), + + /// An unexpected missing value was encountered at a specific row and column. #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), + + /// The size of an offset exceeds the maximum allowed size of 8 bytes. #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. offset_size: u8, }, + + /// The size of an offset is less than the minimum allowed size of 1 byte. #[error("the size of an offset must be at least 1 byte, got {offset_size}")] OffsetSizeTooSmall { /// The read offset size in number of bytes. offset_size: u8, }, + + /// An attempt was made to read an offset that is out of bounds. #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { /// The index of the offset that was being read. index: usize, }, + + /// The output buffer is too small for the compression or decompression operation. #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, + + /// A dictionary is not loaded when it is required for operations. #[error("dictionary is not loaded.")] DictionaryNotLoaded, + + /// It's not possible to generate a compressor after loading a dictionary. #[error("it's not possible to generate a compressor after loading a dictionary.")] CompressorNotAllowed, + + /// The number of offsets is smaller than the requested prune size. #[error("number of offsets ({0}) is smaller than prune request ({1}).")] InvalidPruning(u64, u64), + + /// The jar has been frozen and cannot be modified. #[error("jar has been frozen and cannot be modified.")] FrozenJar, + + /// The file is in an inconsistent state. #[error("File is in an inconsistent state.")] InconsistentState, + + /// A specified file is missing. #[error("Missing file: {0}.")] MissingFile(PathBuf), } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index bdc950aa38a..b1d174feb2c 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -10,7 +10,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use memmap2::Mmap; @@ -28,6 +27,7 @@ use std::os::windows::prelude::OpenOptionsExt; use tracing::*; +/// Compression algorithms supported by `NippyJar`. pub mod compression; #[cfg(test)] use compression::Compression; @@ -55,10 +55,13 @@ pub use writer::NippyJarWriter; mod consistency; pub use consistency::NippyJarChecker; +/// The version number of the Nippy Jar format. const NIPPY_JAR_VERSION: usize = 1; - +/// The file extension used for index files. const INDEX_FILE_EXTENSION: &str = "idx"; +/// The file extension used for offsets files. const OFFSETS_FILE_EXTENSION: &str = "off"; +/// The file extension used for configuration files. pub const CONFIG_FILE_EXTENSION: &str = "conf"; /// A [`RefRow`] is a list of column value slices pointing to either an internal buffer or a diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 9bf9bf52644..3a1003bee76 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -354,6 +354,10 @@ impl NippyJarWriter { Ok(()) } + /// Commits changes to the data file and offsets without synchronizing all data to disk. + /// + /// This function flushes the buffered data to the data file and commits the offsets, + /// but it does not guarantee that all data is synchronized to persistent storage. #[cfg(feature = "test-utils")] pub fn commit_without_sync_all(&mut self) -> Result<(), NippyJarError> { self.data_file.flush()?; @@ -412,41 +416,49 @@ impl NippyJarWriter { Ok(()) } + /// Returns the maximum row size for the associated [`NippyJar`]. #[cfg(test)] pub const fn max_row_size(&self) -> usize { self.jar.max_row_size } + /// Returns the column index of the current checker instance. #[cfg(test)] pub const fn column(&self) -> usize { self.column } + /// Returns a reference to the offsets vector. #[cfg(test)] pub fn offsets(&self) -> &[u64] { &self.offsets } + /// Returns a mutable reference to the offsets vector. #[cfg(test)] pub fn offsets_mut(&mut self) -> &mut Vec { &mut self.offsets } + /// Returns the path to the offsets file for the associated [`NippyJar`]. #[cfg(test)] pub fn offsets_path(&self) -> std::path::PathBuf { self.jar.offsets_path() } + /// Returns the path to the data file for the associated [`NippyJar`]. #[cfg(test)] pub fn data_path(&self) -> &Path { self.jar.data_path() } + /// Returns a mutable reference to the buffered writer for the data file. #[cfg(any(test, feature = "test-utils"))] pub fn data_file(&mut self) -> &mut BufWriter { &mut self.data_file } + /// Returns a reference to the associated [`NippyJar`] instance. #[cfg(any(test, feature = "test-utils"))] pub const fn jar(&self) -> &NippyJar { &self.jar From ddc82e2645b584719996f3824596e4721174aeba Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 08:31:29 +0200 Subject: [PATCH 062/970] test(static-file): add unit tests for `HighestStaticFiles` implementation (#11912) --- crates/static-file/types/src/lib.rs | 78 +++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 38093113886..6e954a781b7 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -71,3 +71,81 @@ pub const fn find_fixed_range( let start = (block / blocks_per_static_file) * blocks_per_static_file; SegmentRangeInclusive::new(start, start + blocks_per_static_file - 1) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_highest_static_files_highest() { + let files = + HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; + + // Test for headers segment + assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); + + // Test for receipts segment + assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); + + // Test for transactions segment + assert_eq!(files.highest(StaticFileSegment::Transactions), None); + } + + #[test] + fn test_highest_static_files_as_mut() { + let mut files = HighestStaticFiles::default(); + + // Modify headers value + *files.as_mut(StaticFileSegment::Headers) = Some(150); + assert_eq!(files.headers, Some(150)); + + // Modify receipts value + *files.as_mut(StaticFileSegment::Receipts) = Some(250); + assert_eq!(files.receipts, Some(250)); + + // Modify transactions value + *files.as_mut(StaticFileSegment::Transactions) = Some(350); + assert_eq!(files.transactions, Some(350)); + } + + #[test] + fn test_highest_static_files_min() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + + // Minimum value among the available segments + assert_eq!(files.min(), Some(100)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.min(), None); + } + + #[test] + fn test_highest_static_files_max() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + + // Maximum value among the available segments + assert_eq!(files.max(), Some(500)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.max(), None); + } + + #[test] + fn test_find_fixed_range() { + // Test with default block size + let block: BlockNumber = 600_000; + let range = find_fixed_range(block, DEFAULT_BLOCKS_PER_STATIC_FILE); + assert_eq!(range.start(), 500_000); + assert_eq!(range.end(), 999_999); + + // Test with a custom block size + let block: BlockNumber = 1_200_000; + let range = find_fixed_range(block, 1_000_000); + assert_eq!(range.start(), 1_000_000); + assert_eq!(range.end(), 1_999_999); + } +} From fbb27ebdad01780a2068399a01e7337bc0bb514f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 21 Oct 2024 10:24:36 +0200 Subject: [PATCH 063/970] chore(ci): update excluded crates in wasm checker (#11915) --- .github/assets/check_wasm.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 52d7009412c..3b2f2d6b7e6 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -36,7 +36,6 @@ exclude_crates=( reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl - reth-evm-ethereum reth-exex reth-exex-test-utils reth-ipc From bdad91b7002913a8bb6ec08982752685b2c5b604 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:42:46 +0200 Subject: [PATCH 064/970] docs: update grafana docs (#11908) --- etc/README.md | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/etc/README.md b/etc/README.md index 28c71b04688..4f4ce7f20e4 100644 --- a/etc/README.md +++ b/etc/README.md @@ -19,55 +19,41 @@ the [docker docs](/book/installation/docker.md#using-docker-compose). #### Adding a new metric to Grafana -To set up a new metric in Reth and its Grafana dashboard: +To set up a new metric in Reth and its Grafana dashboard (this assumes running Reth and Grafana instances): 1. Add the metric to the codebase following the [metrics section](../docs/design/metrics.md#creating-metrics) documentation. -2. Build the Reth image: - - ```bash - docker build . -t reth:local - ``` - - Modify the [docker-compose](./docker-compose.yml) file to use your locally built image for the Reth service. - -3. Run Docker Compose: - - ```bash - docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d - ``` - -4. Access Grafana: +1. Access Grafana: - Open `http://localhost:3000/` in a browser - Log in with username and password `admin` - Navigate to the `Dashboards` tab -5. Create or modify a dashboard: +1. Create or modify a dashboard: - Select an existing dashboard or create a new one - Click `Add` > `Visualization` to create a new panel -6. Configure your metric panel: +1. Configure your metric panel: - Set a panel title and description - Select metric(s) from the `Metrics browser` or use the `PromQL` terminal - Document your metric(s) by setting units, legends, etc. - When adding multiple metrics, use field overwrites if needed -7. Save and arrange: +1. Save and arrange: - Click `Apply` to save the panel - Drag the panel to desired position on the dashboard -8. Export the dashboard: +1. Export the dashboard: - Click `Share` > `Export` - Toggle `Export for sharing externally` - Click `Save to file` -9. Update dashboard file: +1. Update dashboard file: - Replace the content of the corresponding file in the [dashboards folder](./grafana/dashboards) with the exported JSON @@ -75,15 +61,18 @@ Your new metric is now integrated into the Reth Grafana dashboard. #### Import Grafana dashboards -In order to import new Grafana dashboards or update a dashboard: +If you are running Reth and Grafana outside of docker, and wish to import new Grafana dashboards or update a dashboard: 1. Go to `Home` > `Dashboards` -2. Click `New` > `Import` +1. Click `New` > `Import` -3. Drag the JSON dashboard file to import it +1. Drag the JSON dashboard file to import it -4. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to +1. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to avoid conflict -5. Delete the old dashboard +1. Delete the old dashboard + +If you are running Reth and Grafana using docker, after having pulled the updated dashboards from `main`, restart the +Grafana service. This will update all dashboards. \ No newline at end of file From aba4991d0acb514190399c607fbcf56657789718 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Oct 2024 12:31:29 +0200 Subject: [PATCH 065/970] docs: note about type changes (#11925) --- crates/storage/codecs/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 54ca046cb71..58a7db8c10c 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -48,6 +48,12 @@ pub mod test_utils; /// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being /// able to specialize an impl over certain types like `Vec`/`Option` where `T` is a fixed /// size array like `Vec`. +/// +/// ## Caution +/// +/// Due to the bitfields, every type change on the rust type (e.g. `U256` to `u64`) is a breaking +/// change and will lead to a new, incompatible [`Compact`] implementation. Implementers must take +/// special care when changing or rearranging fields. pub trait Compact: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize From f25cceb9f93fcceaae59a030f81f088e29a5f0c1 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Mon, 21 Oct 2024 19:59:09 +0800 Subject: [PATCH 066/970] perf: use Vec::with_capacity and reserve_exact (#11904) --- crates/evm/execution-types/src/chain.rs | 4 ++-- crates/net/discv4/src/lib.rs | 4 ++-- crates/rpc/rpc-eth-types/src/fee_history.rs | 2 +- crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 9 +++++---- crates/rpc/rpc/src/eth/helpers/signer.rs | 2 +- crates/stages/api/src/pipeline/builder.rs | 4 +++- crates/stages/stages/src/stages/bodies.rs | 3 ++- crates/storage/codecs/src/lib.rs | 2 +- crates/storage/nippy-jar/src/compression/zstd.rs | 2 +- .../provider/src/providers/static_file/manager.rs | 2 +- crates/transaction-pool/benches/truncate.rs | 2 +- examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs | 1 + 13 files changed, 22 insertions(+), 17 deletions(-) diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index d3ed2913ea3..65f96ff5638 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -228,11 +228,11 @@ impl Chain { /// /// Attachment includes block number, block hash, transaction hash and transaction index. pub fn receipts_with_attachment(&self) -> Vec { - let mut receipt_attach = Vec::new(); + let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { - let mut tx_receipts = Vec::new(); + let mut tx_receipts = Vec::with_capacity(receipts.len()); for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { tx_receipts.push(( tx.hash(), diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 7c14eac9b65..7963c6e6fd6 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2324,9 +2324,9 @@ mod tests { let original = EnrForkIdEntry { fork_id: ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 0 }, }; - let mut encoded = Vec::new(); - original.encode(&mut encoded); let expected: [u8; 8] = [0xc7, 0xc6, 0x84, 0xdc, 0xe9, 0x6c, 0x2d, 0x80]; + let mut encoded = Vec::with_capacity(expected.len()); + original.encode(&mut encoded); assert_eq!(&expected[..], encoded.as_slice()); } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 57dd276e5cf..c845d968387 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -305,7 +305,7 @@ pub fn calculate_reward_percentiles_for_block( // the percentiles are monotonically increasing. let mut tx_index = 0; let mut cumulative_gas_used = transactions.first().map(|tx| tx.gas_used).unwrap_or_default(); - let mut rewards_in_block = Vec::new(); + let mut rewards_in_block = Vec::with_capacity(percentiles.len()); for percentile in percentiles { // Empty blocks should return in a zero row if transactions.is_empty() { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a673da96720..77db511e625 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -183,7 +183,7 @@ pub fn build_block( ) -> Result>, EthApiError> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(results.len()); let mut log_index = 0; for (transaction_index, ((sender, result), tx)) in diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 6da46804005..5c7fbbd0023 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -421,8 +421,8 @@ mod tests { let mut rng = generators::rng(); // Build mock data - let mut gas_used_ratios = Vec::new(); - let mut base_fees_per_gas = Vec::new(); + let mut gas_used_ratios = Vec::with_capacity(block_count as usize); + let mut base_fees_per_gas = Vec::with_capacity(block_count as usize); let mut last_header = None; let mut parent_hash = B256::default(); @@ -444,8 +444,9 @@ mod tests { last_header = Some(header.clone()); parent_hash = hash; - let mut transactions = vec![]; - for _ in 0..100 { + const TOTAL_TRANSACTIONS: usize = 100; + let mut transactions = Vec::with_capacity(TOTAL_TRANSACTIONS); + for _ in 0..TOTAL_TRANSACTIONS { let random_fee: u128 = rng.gen(); if let Some(base_fee_per_gas) = header.base_fee_per_gas { diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index a5818aa494f..e59be0ac283 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -40,7 +40,7 @@ impl DevSigner { /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait pub fn random_signers(num: u32) -> Vec> { - let mut signers = Vec::new(); + let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random_with(&mut rand::thread_rng()); diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 68ca887fe79..79a4c477ee6 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -34,7 +34,9 @@ impl PipelineBuilder { /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. pub fn add_stages>(mut self, set: Set) -> Self { - for stage in set.builder().build() { + let states = set.builder().build(); + self.stages.reserve_exact(states.len()); + for stage in states { self.stages.push(stage); } self diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 93d8a122992..06a5250913e 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -917,7 +917,8 @@ mod tests { return Poll::Ready(None) } - let mut response = Vec::default(); + let mut response = + Vec::with_capacity(std::cmp::min(this.headers.len(), this.batch_size as usize)); while let Some(header) = this.headers.pop_front() { if header.is_empty() { response.push(BlockResponse::Empty(header)) diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 58a7db8c10c..c432400a576 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -505,7 +505,7 @@ mod tests { #[test] fn compact_address() { - let mut buf = vec![]; + let mut buf = Vec::with_capacity(21); assert_eq!(Address::ZERO.to_compact(&mut buf), 20); assert_eq!(buf, vec![0; 20]); diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 7685941dfbe..896a65bd708 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -213,7 +213,7 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) } - let mut dictionaries = vec![]; + let mut dictionaries = Vec::with_capacity(columns.len()); for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of // each entry diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 20d6a1b184b..e81dc01f722 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1364,13 +1364,13 @@ impl TransactionsProviderExt for StaticFileProvider { // chunks are too big, there will be idle threads waiting for work. Choosing an // arbitrary smaller value to make sure it doesn't happen. let chunk_size = 100; - let mut channels = Vec::new(); // iterator over the chunks let chunks = tx_range .clone() .step_by(chunk_size) .map(|start| start..std::cmp::min(start + chunk_size as u64, tx_range.end)); + let mut channels = Vec::with_capacity(tx_range_size.div_ceil(chunk_size)); for chunk_range in chunks { let (channel_tx, channel_rx) = mpsc::channel(); diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 22e45763054..1ca6f98499c 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -66,7 +66,7 @@ fn generate_many_transactions(senders: usize, max_depth: usize) -> Vec().new_tree(&mut runner).unwrap().current() % max_depth + 1; diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index cc761aa98a6..1c53e4f4105 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -109,6 +109,7 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { + actions_to_queue.reserve_exact(txs.len()); for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) .expect("should not fail to convert blob tx if it is already eip4844"); From 7119bb1fe000dbd7cb58e0fcd5a66c549538e9a2 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 21 Oct 2024 14:34:23 +0200 Subject: [PATCH 067/970] chore: introduce ExecuteOutput (#11929) --- crates/ethereum/evm/src/execute.rs | 6 +++--- crates/evm/src/execute.rs | 32 +++++++++++++++++++++--------- crates/optimism/evm/src/execute.rs | 6 +++--- 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 185f351dd9f..e8b238c6a7c 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -14,7 +14,7 @@ use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ProviderError, + BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, @@ -152,7 +152,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { + ) -> Result { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); @@ -203,7 +203,7 @@ where }, ); } - Ok((receipts, cumulative_gas_used)) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } fn apply_post_execution_changes( diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index d7c8590eea8..2b3ce85e9b9 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -166,6 +166,15 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB: Database + Display>; } +/// Helper type for the output of executing a block. +#[derive(Debug, Clone)] +pub struct ExecuteOutput { + /// Receipts obtained after executing a block. + pub receipts: Vec, + /// Cumulative gas used in the block execution. + pub gas_used: u64, +} + /// Defines the strategy for executing a single block. pub trait BlockExecutionStrategy { /// The error type returned by this strategy's methods. @@ -183,7 +192,7 @@ pub trait BlockExecutionStrategy { &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; + ) -> Result; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( @@ -313,7 +322,8 @@ where let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; let state = self.strategy.finish(); @@ -332,7 +342,8 @@ where let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -356,7 +367,8 @@ where self.strategy.with_state_hook(Some(Box::new(state_hook))); self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -407,7 +419,8 @@ where } self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, _gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, .. } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -545,14 +558,14 @@ mod tests { _chain_spec: Arc, _evm_config: EvmConfig, state: State, - execute_transactions_result: (Vec, u64), + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { - execute_transactions_result: (Vec, u64), + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } @@ -599,7 +612,7 @@ mod tests { &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { + ) -> Result { Ok(self.execute_transactions_result.clone()) } @@ -651,7 +664,8 @@ mod tests { fn test_strategy() { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; - let expected_execute_transactions_result = (expected_receipts.clone(), expected_gas_used); + let expected_execute_transactions_result = + ExecuteOutput { receipts: expected_receipts.clone(), gas_used: expected_gas_used }; let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 10ac5c5250a..20ef64457bb 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -10,7 +10,7 @@ use reth_consensus::ConsensusError; use reth_evm::{ execute::{ BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ProviderError, + BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, @@ -155,7 +155,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { + ) -> Result { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); @@ -240,7 +240,7 @@ where }); } - Ok((receipts, cumulative_gas_used)) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } fn apply_post_execution_changes( From 20dc0c7da0a12e31ab0c896ccf4208585658ad07 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 19:25:33 +0200 Subject: [PATCH 068/970] some fmt (#11933) --- crates/storage/provider/src/providers/blockchain_provider.rs | 2 +- crates/storage/provider/src/providers/mod.rs | 2 +- crates/transaction-pool/src/traits.rs | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1866610e3f2..13215e11a8e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -71,7 +71,7 @@ impl BlockchainProvider2 { /// the database to initialize the provider. pub fn new(database: ProviderFactory) -> ProviderResult { let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + let best = provider.chain_info()?; match provider.header_by_number(best.best_number)? { Some(header) => { drop(provider); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b98bbf5be47..a67ebf89ba6 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -118,7 +118,7 @@ impl BlockchainProvider { /// the database to initialize the provider. pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + let best = provider.chain_info()?; let latest_header = provider .header_by_number(best.best_number)? .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fcfdae4ed1b..56da11fe696 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -72,7 +72,6 @@ pub trait TransactionPool: Send + Sync + Clone { /// Imports all _external_ transactions /// - /// /// Consumer: Utility fn add_external_transactions( &self, @@ -83,7 +82,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Adds an _unvalidated_ transaction into the pool and subscribe to state changes. /// - /// This is the same as [TransactionPool::add_transaction] but returns an event stream for the + /// This is the same as [`TransactionPool::add_transaction`] but returns an event stream for the /// given transaction. /// /// Consumer: Custom From 3f2a41bd3f1d44c9a794a20876ca46a09499bb00 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Oct 2024 20:05:06 +0200 Subject: [PATCH 069/970] chore: rm redundant trait bound (#11940) --- crates/rpc/rpc/src/debug.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 66465ef474a..2d9d6f7822e 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -21,8 +21,8 @@ use reth_evm::{ }; use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProofProvider, - StateProviderFactory, TransactionVariant, + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; @@ -81,7 +81,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiTypes + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -842,7 +841,6 @@ where + HeaderProvider + ChainSpecProvider + StateProviderFactory - + EvmEnvProvider + 'static, Eth: EthApiSpec + EthTransactions + TraceExt + 'static, BlockExecutor: BlockExecutorProvider, @@ -979,15 +977,6 @@ where .map_err(Into::into) } - /// Handler for `debug_executionWitness` - async fn debug_execution_witness( - &self, - block: BlockNumberOrTag, - ) -> RpcResult { - let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block).await.map_err(Into::into) - } - /// Handler for `debug_traceCall` async fn debug_trace_call( &self, @@ -1011,6 +1000,15 @@ where Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) } + /// Handler for `debug_executionWitness` + async fn debug_execution_witness( + &self, + block: BlockNumberOrTag, + ) -> RpcResult { + let _permit = self.acquire_trace_permit().await; + Self::debug_execution_witness(self, block).await.map_err(Into::into) + } + async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { Ok(()) } From d9d184d4984aec0099e3a832cf9dc9c6072af0df Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 21 Oct 2024 20:47:50 +0200 Subject: [PATCH 070/970] feat: default impl for some BlockExecutionStrategy methods (#11941) --- crates/ethereum/evm/src/execute.rs | 7 +------ crates/evm/src/execute.rs | 32 ++++++++++++++++++++++-------- crates/evm/src/test_utils.rs | 2 ++ crates/optimism/evm/src/execute.rs | 10 +--------- 4 files changed, 28 insertions(+), 23 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index e8b238c6a7c..c62949902ae 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -21,7 +21,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_primitives::{BlockWithSenders, Receipt}; -use reth_revm::db::{states::bundle_state::BundleRetention, BundleState, State}; +use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, @@ -264,11 +264,6 @@ where self.system_caller.with_state_hook(hook); } - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - fn validate_block_post_execution( &self, block: &BlockWithSenders, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 2b3ce85e9b9..677a15dfa1b 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,6 +6,7 @@ pub use reth_execution_errors::{ }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; +use revm::db::states::bundle_state::BundleRetention; use crate::system_calls::OnStateHook; use alloc::{boxed::Box, vec::Vec}; @@ -176,7 +177,10 @@ pub struct ExecuteOutput { } /// Defines the strategy for executing a single block. -pub trait BlockExecutionStrategy { +pub trait BlockExecutionStrategy +where + DB: Database, +{ /// The error type returned by this strategy's methods. type Error: From + core::error::Error; @@ -209,18 +213,23 @@ pub trait BlockExecutionStrategy { fn state_mut(&mut self) -> &mut State; /// Sets a hook to be called after each state change during execution. - fn with_state_hook(&mut self, hook: Option>); + fn with_state_hook(&mut self, _hook: Option>) {} /// Returns the final bundle state. - fn finish(&mut self) -> BundleState; + fn finish(&mut self) -> BundleState { + self.state_mut().merge_transitions(BundleRetention::Reverts); + self.state_mut().take_bundle() + } /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - block: &BlockWithSenders, - receipts: &[Receipt], - requests: &Requests, - ) -> Result<(), ConsensusError>; + _block: &BlockWithSenders, + _receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + Ok(()) + } } /// A strategy factory that can create block execution strategies. @@ -293,6 +302,7 @@ where pub struct BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Block execution strategy. pub(crate) strategy: S, @@ -302,6 +312,7 @@ where impl BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { @@ -384,6 +395,7 @@ where pub struct BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Batch execution strategy. pub(crate) strategy: S, @@ -395,6 +407,7 @@ where impl BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Creates a new `BasicBatchExecutor` with the given strategy. pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { @@ -597,7 +610,10 @@ mod tests { } } - impl BlockExecutionStrategy for TestExecutorStrategy { + impl BlockExecutionStrategy for TestExecutorStrategy + where + DB: Database, + { type Error = BlockExecutionError; fn apply_pre_execution_changes( diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index c20f43dca9d..a4dc906494c 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -119,6 +119,7 @@ impl BatchExecutor for MockExecutorProvider { impl BasicBlockExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R @@ -140,6 +141,7 @@ where impl BasicBatchExecutor where S: BlockExecutionStrategy, + DB: Database, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 20ef64457bb..748e57e6b33 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -20,10 +20,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OptimismHardfork; use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - Database, State, -}; +use reth_revm::{Database, State}; use revm_primitives::{ db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, }; @@ -271,11 +268,6 @@ where self.system_caller.with_state_hook(hook); } - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - fn validate_block_post_execution( &self, block: &BlockWithSenders, From bddd3202e4603251d76ecceb6bfacf40b26ec46e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 21 Oct 2024 20:04:09 +0100 Subject: [PATCH 071/970] test(trie): narrow the range of keys for sparse trie fuzz (#11937) --- crates/trie/sparse/src/trie.rs | 73 ++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 0b9ffb5c0ed..6680d7e9b65 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -265,15 +265,15 @@ impl RevealedSparseTrie { } /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, path: Nibbles) -> SparseTrieResult<()> { + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { self.prefix_set.insert(path.clone()); - let existing = self.values.remove(&path); + let existing = self.values.remove(path); if existing.is_none() { // trie structure unchanged, return immediately return Ok(()) } - let mut removed_nodes = self.take_nodes_for_path(&path)?; + let mut removed_nodes = self.take_nodes_for_path(path)?; debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. let mut child = removed_nodes.pop().expect("leaf exists"); @@ -282,7 +282,7 @@ impl RevealedSparseTrie { let mut child_path = child.path.clone(); let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; child_path.extend_from_slice_unchecked(key); - assert_eq!(child_path, path); + assert_eq!(&child_path, path); } // If we don't have any other removed nodes, insert an empty node at the root. @@ -727,6 +727,7 @@ mod tests { use super::*; use alloy_primitives::U256; use itertools::Itertools; + use prop::sample::SizeRange; use proptest::prelude::*; use rand::seq::IteratorRandom; use reth_testing_utils::generators; @@ -998,7 +999,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1049,7 +1050,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1085,7 +1086,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1118,7 +1119,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -1140,7 +1141,7 @@ mod tests { ]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); // Leaf (Key = 53302) pretty_assertions::assert_eq!( @@ -1151,7 +1152,7 @@ mod tests { ),]) ); - sparse.remove_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); // Empty pretty_assertions::assert_eq!( @@ -1162,34 +1163,31 @@ mod tests { #[test] fn sparse_trie_fuzz() { - proptest!(ProptestConfig::with_cases(10), |(updates: Vec>)| { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec>>) { let mut rng = generators::rng(); let mut state = BTreeMap::default(); - let mut unpacked_state = BTreeMap::default(); let mut sparse = RevealedSparseTrie::default(); for update in updates { let keys_to_delete_len = update.len() / 2; - let unpacked_update = update.iter().map(|(key, value)| ( - Nibbles::unpack(key), - alloy_rlp::encode_fixed_size(value).to_vec() - )); - // Insert state updates into the sparse trie and calculate the root - for (key, value) in unpacked_update.clone() { + for (key, value) in update.clone() { sparse.update_leaf(key, value).unwrap(); } let sparse_root = sparse.root(); // Insert state updates into the hash builder and calculate the root - unpacked_state.extend(unpacked_update); state.extend(update); - let keys = state.keys().map(Nibbles::unpack).collect::>(); let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - unpacked_state.clone(), - keys, + state.clone(), + state.keys().cloned().collect::>(), ); // Assert that the sparse trie root matches the hash builder root @@ -1204,20 +1202,18 @@ mod tests { .keys() .choose_multiple(&mut rng, keys_to_delete_len) .into_iter() - .copied() + .cloned() .collect::>(); for key in keys_to_delete { state.remove(&key).unwrap(); - unpacked_state.remove(&Nibbles::unpack(key)).unwrap(); - sparse.remove_leaf(Nibbles::unpack(key)).unwrap(); + sparse.remove_leaf(&key).unwrap(); } let sparse_root = sparse.root(); - let keys = state.keys().map(Nibbles::unpack).collect::>(); let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - unpacked_state.clone(), - keys, + state.clone(), + state.keys().cloned().collect::>(), ); // Assert that the sparse trie root matches the hash builder root @@ -1225,6 +1221,25 @@ mod tests { // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } - }); + } + + /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. + fn pad_nibbles(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::hash_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), + any::>(), + 1..100, + ), + 1..100, + ) + )| { test(updates) }); } } From 7f47ef0fd3e540232f24f07d31f3b12e8be4e8cd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Oct 2024 21:18:24 +0200 Subject: [PATCH 072/970] chore: spawn eth_calls as blocking tasks (#11944) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b43b34305bd..64017f9f8fc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -544,6 +544,16 @@ pub trait Call: LoadState + SpawnBlocking { /// /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + /// + /// This is primarily used by `eth_call`. + /// + /// # Blocking behaviour + /// + /// This assumes executing the call is relatively more expensive on IO than CPU because it + /// transacts a single transaction on an empty in memory database. Because `eth_call`s are + /// usually allowed to consume a lot of gas, this also allows a lot of memory operations so + /// we assume this is not primarily CPU bound and instead spawn the call on a regular tokio task + /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, request: TransactionRequest, @@ -561,7 +571,7 @@ pub trait Call: LoadState + SpawnBlocking { async move { let (cfg, block_env, at) = self.evm_env_at(at).await?; let this = self.clone(); - self.spawn_tracing(move |_| { + self.spawn_blocking_io(move |_| { let state = this.state_at_block_id(at)?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); From 1f4ca32e35a7cc4bfccf7cc5a974429e3df4a846 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 21:18:30 +0200 Subject: [PATCH 073/970] refactor(primitives-traits): rm useless trait bounds for `Receipt` (#11942) --- crates/primitives-traits/src/receipt.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2d19e4d4ff..5c317dc49a2 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,7 +1,5 @@ //! Receipt abstraction -use alloc::fmt; - use alloy_consensus::TxReceipt; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -14,10 +12,6 @@ impl FullReceipt for T where T: Receipt + Compact {} /// Abstraction of a receipt. pub trait Receipt: TxReceipt - + Clone - + fmt::Debug - + PartialEq - + Eq + Default + alloy_rlp::Encodable + alloy_rlp::Decodable From 4d3b35dbd24c3a5c6b1a4f7bd86b1451e8efafcc Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 21:31:40 +0200 Subject: [PATCH 074/970] test(tx-pool): add unit test for `remove_sender_count` (#11939) --- crates/transaction-pool/src/pool/parked.rs | 64 +++++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index b591fdb539a..407f04fd5be 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -35,8 +35,8 @@ pub struct ParkedPool { best: BTreeSet>, /// Keeps track of last submission id for each sender. /// - /// This are sorted in Reverse order, so the last (highest) submission id is first, and the - /// lowest(oldest) is the last. + /// This are sorted in reverse order, so the last (highest) submission id is first, and the + /// lowest (oldest) is the last. last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. @@ -856,4 +856,64 @@ mod tests { assert_eq!(submission_info2.sender_id, sender2); assert_eq!(submission_info2.submission_id, 2); } + + #[test] + fn test_remove_sender_count() { + // Initialize a mock transaction factory + let mut f = MockTransactionFactory::default(); + // Create an empty transaction pool + let mut pool = ParkedPool::>::default(); + // Generate two validated transactions and add them to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Define two different sender IDs and their corresponding submission IDs + let sender1: SenderId = 11.into(); + let sender2: SenderId = 22.into(); + + // Add the sender counts to the pool + pool.add_sender_count(sender1, 1); + + // We add sender 2 multiple times to test the removal of sender counts + pool.add_sender_count(sender2, 2); + pool.add_sender_count(sender2, 3); + + // Before removing the sender count we should have 4 sender transaction counts + assert_eq!(pool.sender_transaction_count.len(), 4); + assert!(pool.sender_transaction_count.contains_key(&sender1)); + + // We should have 1 sender transaction count for sender 1 before removing the sender count + assert_eq!(pool.sender_transaction_count.get(&sender1).unwrap().count, 1); + + // Remove the sender count for sender 1 + pool.remove_sender_count(sender1); + + // After removing the sender count we should have 3 sender transaction counts remaining + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(!pool.sender_transaction_count.contains_key(&sender1)); + + // Check the sender transaction count for sender 2 before removing the sender count + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 2, last_submission_id: 3 } + ); + + // Remove the sender count for sender 2 + pool.remove_sender_count(sender2); + + // After removing the sender count for sender 2, we still have 3 sender transaction counts + // remaining. + // + // This is because we added sender 2 multiple times and we only removed the last submission. + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(pool.sender_transaction_count.contains_key(&sender2)); + + // Sender transaction count for sender 2 should be updated correctly + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 1, last_submission_id: 3 } + ); + } } From 387b0f8b361f0c42cee92cc598232fdf344487b9 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 21 Oct 2024 22:14:18 +0200 Subject: [PATCH 075/970] refactor(tx-pool): small refactor for `contains_conflicting_transaction` (#11935) --- crates/transaction-pool/src/pool/txpool.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index a85a9a1856b..e5857d56457 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1405,12 +1405,9 @@ impl AllTransactions { /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. #[inline] fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { - let mut iter = self.txs_iter(tx.transaction_id.sender); - if let Some((_, existing)) = iter.next() { - return tx.tx_type_conflicts_with(&existing.transaction) - } - // no existing transaction for this sender - false + self.txs_iter(tx.transaction_id.sender) + .next() + .map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. From cc895e705249197778c2b467af603b6f8e0f7641 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 22 Oct 2024 00:14:48 +0400 Subject: [PATCH 076/970] fix: always accept transactions with current nonce (#11931) --- crates/transaction-pool/src/pool/txpool.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index e5857d56457..4fbec1105aa 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1421,11 +1421,15 @@ impl AllTransactions { fn ensure_valid( &self, transaction: ValidPoolTransaction, + on_chain_nonce: u64, ) -> Result, InsertErr> { if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); - if current_txs >= self.max_account_slots { + + // Reject transactions if sender's capacity is exceeded. + // If transaction's nonce matches on-chain nonce always let it through + if current_txs >= self.max_account_slots && transaction.nonce() > on_chain_nonce { return Err(InsertErr::ExceededSenderTransactionsCapacity { transaction: Arc::new(transaction), }) @@ -1592,7 +1596,7 @@ impl AllTransactions { ) -> InsertResult { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); - let mut transaction = self.ensure_valid(transaction)?; + let mut transaction = self.ensure_valid(transaction, on_chain_nonce)?; let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); @@ -2631,6 +2635,7 @@ mod tests { let mut pool = AllTransactions::default(); let mut tx = MockTransaction::eip1559(); + let unblocked_tx = tx.clone(); for _ in 0..pool.max_account_slots { tx = tx.next(); pool.insert_tx(f.validated(tx.clone()), on_chain_balance, on_chain_nonce).unwrap(); @@ -2644,6 +2649,10 @@ mod tests { let err = pool.insert_tx(f.validated(tx.next()), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::ExceededSenderTransactionsCapacity { .. })); + + assert!(pool + .insert_tx(f.validated(unblocked_tx), on_chain_balance, on_chain_nonce) + .is_ok()); } #[test] From 88a38de4d74cb8f01de405b67a65b098580434f0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Oct 2024 22:46:16 +0200 Subject: [PATCH 077/970] chore(rpc): relax some types (#11946) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 11 ++--- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 2 +- crates/rpc/rpc-eth-types/src/cache/db.rs | 50 ++++++++++----------- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 64017f9f8fc..0acf6646294 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -655,14 +655,14 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the index of the target transaction in the given iterator. fn replay_transactions_until<'a, DB, I>( &self, - db: &mut CacheDB, + db: &mut DB, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, transactions: I, target_tx_hash: B256, ) -> Result where - DB: DatabaseRef, + DB: Database + DatabaseCommit, EthApiError: From, I: IntoIterator, { @@ -929,14 +929,15 @@ pub trait Call: LoadState + SpawnBlocking { /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] - fn map_out_of_gas_err( + fn map_out_of_gas_err( &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - db: &mut CacheDB>, + db: &mut DB, ) -> Self::Error where - S: StateProvider, + DB: Database, + EthApiError: From, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 981de8fa6c4..64056148cd3 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -117,7 +117,7 @@ pub trait Trace: LoadState { self.spawn_with_state_at_block(at, move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7422dcfb8a7..627fd2b2df7 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -114,6 +114,13 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { self.0.block_hash(block_number) } + fn convert_block_hash( + &self, + hash_or_number: alloy_rpc_types::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } + fn canonical_hashes_range( &self, start: alloy_primitives::BlockNumber, @@ -121,21 +128,22 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult> { self.0.canonical_hashes_range(start, end) } +} - fn convert_block_hash( +impl StateProvider for StateProviderTraitObjWrapper<'_> { + fn storage( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, - ) -> reth_errors::ProviderResult> { - self.0.convert_block_hash(hash_or_number) + account: revm_primitives::Address, + storage_key: alloy_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) } -} -impl StateProvider for StateProviderTraitObjWrapper<'_> { - fn account_balance( + fn bytecode_by_hash( &self, - addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_balance(addr) + code_hash: B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) } fn account_code( @@ -145,26 +153,18 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { self.0.account_code(addr) } - fn account_nonce( + fn account_balance( &self, addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_nonce(addr) - } - - fn bytecode_by_hash( - &self, - code_hash: B256, - ) -> reth_errors::ProviderResult> { - self.0.bytecode_by_hash(code_hash) + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) } - fn storage( + fn account_nonce( &self, - account: revm_primitives::Address, - storage_key: alloy_primitives::StorageKey, - ) -> reth_errors::ProviderResult> { - self.0.storage(account, storage_key) + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) } } From f2ac547666b3b79b20597571530768e7a4a2dc09 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 22 Oct 2024 00:02:44 +0300 Subject: [PATCH 078/970] Add custom beacon withdrawals example (#9497) Co-authored-by: Federico Gimenez --- Cargo.lock | 33 +- Cargo.toml | 1 + examples/custom-beacon-withdrawals/Cargo.toml | 26 ++ .../custom-beacon-withdrawals/src/main.rs | 286 ++++++++++++++++++ 4 files changed, 338 insertions(+), 8 deletions(-) create mode 100644 examples/custom-beacon-withdrawals/Cargo.toml create mode 100644 examples/custom-beacon-withdrawals/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 19287e6669a..9082aaeb274 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -609,9 +609,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2395336745358cc47207442127c47c63801a7065ecc0aa928da844f8bb5576" +checksum = "b0900b83f4ee1f45c640ceee596afbc118051921b9438fdb5a3175c1a7e05f8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -623,9 +623,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed5047c9a241df94327879c2b0729155b58b941eae7805a7ada2e19436e6b39" +checksum = "a41b1e78dde06b5e12e6702fa8c1d30621bf07728ba75b801fb801c9c6a0ba10" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -641,9 +641,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dee02a81f529c415082235129f0df8b8e60aa1601b9c9298ffe54d75f57210b" +checksum = "91dc311a561a306664393407b88d3e53ae58581624128afd8a15faa5de3627dc" dependencies = [ "const-hex", "dunce", @@ -2783,6 +2783,23 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-sol-macro", + "alloy-sol-types", + "eyre", + "reth", + "reth-chainspec", + "reth-evm", + "reth-evm-ethereum", + "reth-node-ethereum", + "reth-primitives", +] + [[package]] name = "example-custom-dev-node" version = "0.0.0" @@ -10355,9 +10372,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfc1bfd06acc78f16d8fd3ef846bc222ee7002468d10a7dce8d703d6eab89a3" +checksum = "9d5e0c2ea8db64b2898b62ea2fbd60204ca95e0b2c6bdf53ff768bbe916fbe4d" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 5b6912c33f2..b32e2d0dfbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,6 +145,7 @@ members = [ "examples/rpc-db/", "examples/stateful-precompile/", "examples/txpool-tracing/", + "examples/custom-beacon-withdrawals", "testing/ef-tests/", "testing/testing-utils", ] diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml new file mode 100644 index 00000000000..c396ca11df8 --- /dev/null +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-chainspec.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true + +alloy-sol-macro = "0.8.9" +alloy-sol-types.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true + +eyre.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism" +] \ No newline at end of file diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs new file mode 100644 index 00000000000..09dad2f7007 --- /dev/null +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -0,0 +1,286 @@ +//! Example for how to modify a block post-execution step. It credits beacon withdrawals with a +//! custom mechanism instead of minting native tokens + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_eips::eip7685::Requests; +use alloy_sol_macro::sol; +use alloy_sol_types::SolCall; +#[cfg(feature = "optimism")] +use reth::revm::primitives::OptimismFields; +use reth::{ + api::{ConfigureEvm, ConfigureEvmEnv, NodeTypesWithEngine}, + builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}, + cli::Cli, + providers::ProviderError, + revm::{ + interpreter::Host, + primitives::{Env, TransactTo, TxEnv}, + Database, DatabaseCommit, Evm, State, + }, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm::execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, + InternalBlockExecutionError, +}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{ + address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, + }, + BlockWithSenders, Receipt, Withdrawal, +}; +use std::{fmt::Display, sync::Arc}; + +pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); +pub const WITHDRAWALS_ADDRESS: Address = address!("4200000000000000000000000000000000000000"); + +fn main() { + Cli::parse_args() + .run(|builder, _| async move { + let handle = builder + // use the default ethereum node types + .with_types::() + // Configure the components of the node + // use default ethereum components but use our custom pool + .with_components( + EthereumNode::components().executor(CustomExecutorBuilder::default()), + ) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} + +/// A custom executor builder +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct CustomExecutorBuilder; + +impl ExecutorBuilder for CustomExecutorBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + type Executor = BasicBlockExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + CustomExecutorStrategyFactory { chain_spec, evm_config: evm_config.clone() }; + let executor = BasicBlockExecutorProvider::new(strategy_factory); + + Ok((evm_config, executor)) + } +} + +#[derive(Clone)] +pub struct CustomExecutorStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, +} + +impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Strategy + Display>> = CustomExecutorStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + CustomExecutorStrategy { + state, + chain_spec: self.chain_spec.clone(), + evm_config: self.evm_config.clone(), + } + } +} + +pub struct CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, + /// Current state for block execution. + state: State, +} + +impl CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block( + &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for CustomExecutorStrategy +where + DB: Database + Display>, +{ + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + Ok(()) + } + + fn execute_transactions( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result { + Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + if let Some(withdrawals) = block.body.withdrawals.as_ref() { + apply_withdrawals_contract_call(withdrawals, &mut evm)?; + } + + Ok(Requests::default()) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +sol!( + function withdrawals( + uint64[] calldata amounts, + address[] calldata addresses + ); +); + +/// Applies the post-block call to the withdrawal / deposit contract, using the given block, +/// [`ChainSpec`], EVM. +pub fn apply_withdrawals_contract_call( + withdrawals: &[Withdrawal], + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB::Error: std::fmt::Display, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + fill_tx_env_with_system_contract_call( + &mut evm.context.evm.env, + SYSTEM_ADDRESS, + WITHDRAWALS_ADDRESS, + withdrawalsCall { + amounts: withdrawals.iter().map(|w| w.amount).collect::>(), + addresses: withdrawals.iter().map(|w| w.address).collect::>(), + } + .abi_encode() + .into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other( + format!("withdrawal contract system call revert: {}", e).into(), + ))) + } + }; + + // Clean-up post system tx context + state.remove(&SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +fn fill_tx_env_with_system_contract_call( + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, +) { + env.tx = TxEnv { + caller, + transact_to: TransactTo::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the call, + // and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from the + // `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + #[cfg(feature = "optimism")] + optimism: OptimismFields::default(), + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; +} From 51594c9a68e437075f0aa0bfc13491dd82176d7c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Oct 2024 23:36:44 +0200 Subject: [PATCH 079/970] chore: relax payload traits (#11947) --- crates/e2e-test-utils/src/payload.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 946d9af5753..a5e4f56ac42 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,20 +1,20 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; +use reth::api::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations #[derive(Debug)] -pub struct PayloadTestContext { - pub payload_event_stream: BroadcastStream>, - payload_builder: PayloadBuilderHandle, +pub struct PayloadTestContext { + pub payload_event_stream: BroadcastStream>, + payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); // Cancun timestamp @@ -24,10 +24,10 @@ impl PayloadTestContext { /// Creates a new payload job from static attributes pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> E::PayloadBuilderAttributes, - ) -> eyre::Result { + attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes, + ) -> eyre::Result { self.timestamp += 1; - let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); + let attributes = attributes_generator(self.timestamp); self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } @@ -35,10 +35,10 @@ impl PayloadTestContext { /// Asserts that the next event is a payload attributes event pub async fn expect_attr_event( &mut self, - attrs: E::PayloadBuilderAttributes, + attrs: T::PayloadBuilderAttributes, ) -> eyre::Result<()> { let first_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { + if let Events::Attributes(attr) = first_event { assert_eq!(attrs.timestamp(), attr.timestamp()); } else { panic!("Expect first event as payload attributes.") @@ -59,9 +59,9 @@ impl PayloadTestContext { } /// Expects the next event to be a built payload event or panics - pub async fn expect_built_payload(&mut self) -> eyre::Result { + pub async fn expect_built_payload(&mut self) -> eyre::Result { let second_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::BuiltPayload(payload) = second_event { + if let Events::BuiltPayload(payload) = second_event { Ok(payload) } else { panic!("Expect a built payload event."); From 2973f0c3e80705d0303a62874ec4fffadf3472aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:33:47 +0200 Subject: [PATCH 080/970] chore(deps): bump dawidd6/action-homebrew-bump-formula from 3 to 4 (#11951) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 2142360e039..f7df80e81f9 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 + uses: dawidd6/action-homebrew-bump-formula@v4 with: token: ${{ secrets.HOMEBREW }} no_fork: true From e52f647644cb10fe6d310428d75ac3045140f311 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:34:53 +0200 Subject: [PATCH 081/970] chore(primitives): use alloy eth conversion constants (#11892) --- Cargo.lock | 3 ++ crates/consensus/common/src/calc.rs | 4 +-- crates/ethereum/evm/src/execute.rs | 6 ++-- crates/evm/Cargo.toml | 36 ++++++++++--------- crates/evm/src/state_change.rs | 2 +- crates/exex/exex/src/backfill/test_utils.rs | 5 ++- crates/node/events/Cargo.toml | 1 + crates/node/events/src/node.rs | 3 +- crates/primitives-traits/src/constants/mod.rs | 9 ----- crates/revm/Cargo.toml | 27 +++++++------- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 3 +- crates/rpc/rpc-eth-types/src/revm_utils.rs | 2 +- 12 files changed, 50 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9082aaeb274..37135dfd545 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7441,6 +7441,7 @@ dependencies = [ name = "reth-evm" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -8053,6 +8054,7 @@ dependencies = [ name = "reth-node-events" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -8590,6 +8592,7 @@ dependencies = [ name = "reth-revm" version = "1.1.0" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "reth-ethereum-forks", diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 3f519332fec..e30c5b715f5 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ +use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{EthereumHardfork, Hardforks}; -use reth_primitives::constants::ETH_TO_WEI; /// Calculates the base block reward. /// @@ -57,7 +57,7 @@ pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: Blo /// ``` /// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; -/// # use reth_primitives::constants::ETH_TO_WEI; +/// # use alloy_consensus::constants::ETH_TO_WEI; /// # use alloy_primitives::U256; /// # /// // This is block 126 on mainnet. diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index c62949902ae..f082a3a707e 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -296,7 +296,7 @@ impl EthExecutorProvider { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{Header, TxLegacy}; + use alloy_consensus::{constants::ETH_TO_WEI, Header, TxLegacy}; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, @@ -309,9 +309,7 @@ mod tests { BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{ - constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, - }; + use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 6a1e1fe0d72..d97a5786419 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -39,27 +39,29 @@ parking_lot = { workspace = true, optional = true } [dev-dependencies] parking_lot.workspace = true reth-ethereum-forks.workspace = true +alloy-consensus.workspace = true [features] default = ["std"] std = [ - "dep:metrics", - "dep:reth-metrics", - "reth-consensus/std", - "reth-primitives/std", - "reth-primitives-traits/std", - "reth-revm/std", - "alloy-eips/std", - "alloy-primitives/std", - "revm-primitives/std", - "revm/std" + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "revm-primitives/std", + "revm/std", ] test-utils = [ - "dep:parking_lot", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "revm/test-utils" + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", ] diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 2d520901527..2a3d93f94d9 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -91,9 +91,9 @@ pub fn insert_post_block_withdrawals_balance_increments Date: Tue, 22 Oct 2024 12:35:09 +0200 Subject: [PATCH 082/970] fix: spawn network manager on test exex ctx (#11907) --- crates/exex/test-utils/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9b86da7c77a..1f6ea75ce6d 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -268,15 +268,16 @@ pub async fn test_exex_context_with_chain_spec( let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) .with_unused_discovery_port() + .with_unused_listener_port() .build(provider_factory.clone()), ) .await?; let network = network_manager.handle().clone(); - - let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let tasks = TaskManager::current(); let task_executor = tasks.executor(); + tasks.executor().spawn(network_manager); + + let (_, payload_builder) = NoopPayloadBuilderService::::new(); let components = NodeAdapter::, _>, _> { components: Components { From 4a68c5e2d49a4789fcccbce2ed645a40f86e4844 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:35:27 +0200 Subject: [PATCH 083/970] refactor(rpc): small refactor in `block_with_senders` (#11950) --- crates/rpc/rpc-eth-api/src/helpers/block.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 9993b477a66..9bf35d850af 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -219,17 +219,17 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = LoadPendingBlock::provider(self) + if let Some(pending_block) = LoadPendingBlock::provider(self) .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)?; - return if maybe_pending.is_some() { - Ok(maybe_pending.map(Arc::new)) - } else { - // If no pending block from provider, try to get local pending block - return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(Arc::new(block))), - None => Ok(None), - }; + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some(Arc::new(pending_block))); + } + + // If no pending block from provider, try to get local pending block + return match self.local_pending_block().await? { + Some((block, _)) => Ok(Some(Arc::new(block))), + None => Ok(None), }; } From 0df7f65f3d0e9ab5af616c8d5913d9d660113775 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 13:09:44 +0200 Subject: [PATCH 084/970] chore: serde 1.0.210 (#11963) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index b32e2d0dfbe..91e3ac6e501 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -503,7 +503,7 @@ rand = "0.8.5" rayon = "1.7" rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" -serde = { version = "1.0", default-features = false } +serde = { version = "=1.0.210", default-features = false } serde_json = "1.0.94" serde_with = "3.3.0" sha2 = { version = "0.10", default-features = false } From 75b39bc2647b28a62dd583a56090305338264a4c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 13:28:27 +0200 Subject: [PATCH 085/970] chore: run clippy locked (#11964) --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1921859c272..4723d8a4d57 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,10 +18,10 @@ jobs: matrix: include: - type: ethereum - args: --bin reth --workspace + args: --bin reth --workspace --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism - args: --bin op-reth --workspace + args: --bin op-reth --workspace --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: book args: --manifest-path book/sources/Cargo.toml --workspace --bins From e8205936daea4419191d68f0a8aa82517a025033 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 22 Oct 2024 13:38:45 +0200 Subject: [PATCH 086/970] primitive-traits: rm unused `SELECTOR_LEN` (#11959) --- crates/primitives-traits/src/constants/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 12ef300f667..2874c596a8a 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -10,9 +10,6 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The first four bytes of the call data for a function call specifies the function to be called. -pub const SELECTOR_LEN: usize = 4; - /// An EPOCH is a series of 32 slots. pub const EPOCH_SLOTS: u64 = 32; From cab76f2083be680afa4ddcff44420334dd0e71a5 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:39:08 +0900 Subject: [PATCH 087/970] fix(engine): run pruner after saving blocks (#11927) --- crates/engine/tree/src/persistence.rs | 38 +++++++++------------------ 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 25c1f0ed703..f4650a047b4 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -77,20 +77,22 @@ impl PersistenceService { } PersistenceAction::SaveBlocks(blocks, sender) => { let result = self.on_save_blocks(blocks)?; - if let Some(ref num_hash) = result { + let result_number = result.map(|r| r.number); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(result); + + if let Some(block_number) = result_number { // send new sync metrics based on saved blocks let _ = self .sync_metrics_tx - .send(MetricEvent::SyncHeight { height: num_hash.number }); - } - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(result); - } - PersistenceAction::PruneBefore(block_num, sender) => { - let res = self.prune_before(block_num)?; + .send(MetricEvent::SyncHeight { height: block_number }); - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); + if self.pruner.is_pruning_needed(block_number) { + // We log `PrunerOutput` inside the `Pruner` + let _ = self.prune_before(block_number)?; + } + } } PersistenceAction::SaveFinalizedBlock(finalized_block) => { let provider = self.provider.database_provider_rw()?; @@ -175,10 +177,6 @@ pub enum PersistenceAction { /// static files. RemoveBlocksAbove(u64, oneshot::Sender>), - /// Prune associated block data before the given block number, according to already-configured - /// prune modes. - PruneBefore(u64, oneshot::Sender), - /// Update the persisted finalized block on disk SaveFinalizedBlock(u64), @@ -279,18 +277,6 @@ impl PersistenceHandle { ) -> Result<(), SendError> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } - - /// Tells the persistence service to remove block data before the given hash, according to the - /// configured prune config. - /// - /// The resulting [`PrunerOutput`] is returned in the receiver end of the sender argument. - pub fn prune_before( - &self, - block_num: u64, - tx: oneshot::Sender, - ) -> Result<(), SendError> { - self.send_action(PersistenceAction::PruneBefore(block_num, tx)) - } } #[cfg(test)] From 90aaad8285ea549734e978f17fcb7622fb137800 Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Tue, 22 Oct 2024 07:53:39 -0400 Subject: [PATCH 088/970] opt: reduce allocs for `parse_deposits_from_receipts` (#11949) --- crates/ethereum/evm/src/eip6110.rs | 114 ++++++++++++++++++----------- 1 file changed, 73 insertions(+), 41 deletions(-) diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index 4cf1c6ae9da..d5700208195 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,12 +1,18 @@ //! EIP-6110 deposit requests parsing use alloc::{string::ToString, vec::Vec}; use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; -use alloy_primitives::{Bytes, Log}; +use alloy_primitives::{Address, Bytes, Log}; use alloy_sol_types::{sol, SolEvent}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec}; use reth_evm::execute::BlockValidationError; use reth_primitives::Receipt; +/// The size of a deposit request in bytes. While the event fields emit +/// bytestrings, those bytestrings are fixed size. The fields are: 48-byte +/// pubkey, 32-byte withdrawal credentials, 8-byte amount, 96-byte signature, +/// and 8-byte index. +const DEPOSIT_BYTES_SIZE: usize = 48 + 32 + 8 + 96 + 8; + sol! { #[allow(missing_docs)] event DepositEvent( @@ -18,53 +24,79 @@ sol! { ); } -/// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -/// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a -/// [vector](Vec) of (requests)[`alloy_eips::eip7685::Requests`]. -pub fn parse_deposits_from_receipts<'a, I>( - chain_spec: &ChainSpec, - receipts: I, -) -> Result -where - I: IntoIterator, -{ - let mut requests = Vec::new(); - let deposit_contract_address = chain_spec - .deposit_contract - .as_ref() - .map_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS, |contract| contract.address); - let logs: Vec<_> = receipts - .into_iter() - .flat_map(|receipt| &receipt.logs) - // No need to filter for topic because there's only one event and that's the Deposit - // event in the deposit contract. - .filter(|log| log.address == deposit_contract_address) - .collect(); +/// Accumulate a deposit request from a log. containing a [`DepositEvent`]. +pub fn accumulate_deposit_from_log(log: &Log, out: &mut Vec) { + out.reserve(DEPOSIT_BYTES_SIZE); + out.extend_from_slice(log.pubkey.as_ref()); + out.extend_from_slice(log.withdrawal_credentials.as_ref()); + out.extend_from_slice(log.amount.as_ref()); + out.extend_from_slice(log.signature.as_ref()); + out.extend_from_slice(log.index.as_ref()); +} - for log in &logs { +/// Accumulate deposits from an iterator of logs. +pub fn accumulate_deposits_from_logs<'a>( + address: Address, + logs: impl IntoIterator, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + logs.into_iter().filter(|log| log.address == address).try_for_each(|log| { + // We assume that the log is valid because it was emitted by the + // deposit contract. let decoded_log = DepositEvent::decode_log(log, false).map_err(|err: alloy_sol_types::Error| { BlockValidationError::DepositRequestDecode(err.to_string()) })?; - requests.extend(parse_deposit_from_log(&decoded_log).as_ref()) - } + accumulate_deposit_from_log(&decoded_log, out); + Ok(()) + }) +} + +/// Accumulate deposits from a receipt. Iterates over the logs in the receipt +/// and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipt( + address: Address, + receipt: &Receipt, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + accumulate_deposits_from_logs(address, &receipt.logs, out) +} - Ok(requests.into()) +/// Accumulate deposits from a list of receipts. Iterates over the logs in the +/// receipts and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipts<'a, I>( + address: Address, + receipts: I, + out: &mut Vec, +) -> Result<(), BlockValidationError> +where + I: IntoIterator, +{ + receipts + .into_iter() + .try_for_each(|receipt| accumulate_deposits_from_receipt(address, receipt, out)) } -fn parse_deposit_from_log(log: &Log) -> Bytes { - // SAFETY: These `expect` https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/solidity_deposit_contract/deposit_contract.sol#L107-L110 - // are safe because the `DepositEvent` is the only event in the deposit contract and the length - // checks are done there. - [ - log.pubkey.as_ref(), - log.withdrawal_credentials.as_ref(), - log.amount.as_ref(), - log.signature.as_ref(), - log.index.as_ref(), - ] - .concat() - .into() +/// Find deposit logs in a list of receipts, and return the concatenated +/// deposit request bytestring. +/// +/// The address of the deposit contract is taken from the chain spec, and +/// defaults to [`MAINNET_DEPOSIT_CONTRACT_ADDRESS`] if not specified in +/// the chain spec. +pub fn parse_deposits_from_receipts<'a, I>( + chainspec: &ChainSpec, + receipts: I, +) -> Result +where + I: IntoIterator, +{ + let mut out = Vec::new(); + accumulate_deposits_from_receipts( + chainspec.deposit_contract().map(|c| c.address).unwrap_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS), + receipts, + &mut out, + )?; + Ok(out.into()) } #[cfg(test)] From df57aedba64af2cdc1b006792fb147454529bbe3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 14:34:03 +0200 Subject: [PATCH 089/970] chore: bump discv5 (#11966) --- Cargo.lock | 153 +++++++++++++++++++++-------------------------------- Cargo.toml | 2 +- 2 files changed, 62 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37135dfd545..59c582fec3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,7 +343,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "reqwest", "schnellru", @@ -2085,7 +2085,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "libc", - "parking_lot 0.12.3", + "parking_lot", "winapi", ] @@ -2098,7 +2098,7 @@ dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "rustix", "signal-hook", "signal-hook-mio", @@ -2255,7 +2255,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", ] [[package]] @@ -2269,7 +2269,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", "serde", ] @@ -2316,11 +2316,12 @@ dependencies = [ [[package]] name = "delay_map" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", + "tokio", "tokio-util", ] @@ -2470,9 +2471,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" +checksum = "23e6b70634e26c909d1edbb3142b3eaf3b89da0e52f284f00ca7c80d9901ad9e" dependencies = [ "aes", "aes-gcm", @@ -2491,13 +2492,13 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.11.2", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2 0.4.10", + "socket2", "tokio", "tracing", - "uint", + "uint 0.10.0", "zeroize", ] @@ -3029,7 +3030,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "eyre", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-chainspec", "reth-node-api", @@ -3528,9 +3529,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -3776,7 +3777,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -4166,7 +4167,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4324,7 +4325,7 @@ dependencies = [ "http-body", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "rustc-hash 2.0.0", @@ -4590,7 +4591,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.7", + "redox_syscall", ] [[package]] @@ -5433,17 +5434,6 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -5451,21 +5441,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -5476,7 +5452,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -5706,7 +5682,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "symbolic-demangle", "tempfile", @@ -5785,7 +5761,7 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "uint", + "uint 0.9.5", ] [[package]] @@ -5992,7 +5968,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.0.0", "rustls", - "socket2 0.5.7", + "socket2", "thiserror", "tokio", "tracing", @@ -6023,7 +5999,7 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -6180,15 +6156,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.7" @@ -6544,7 +6511,7 @@ dependencies = [ "assert_matches", "linked_hash_set", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-blockchain-tree-api", "reth-chainspec", "reth-consensus", @@ -6594,7 +6561,7 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", @@ -6843,7 +6810,7 @@ dependencies = [ "iai-callgrind", "metrics", "page_size", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -6951,7 +6918,7 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-ethereum-forks", "reth-net-banlist", @@ -7001,7 +6968,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-chainspec", "reth-ethereum-forks", @@ -7447,7 +7414,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", "reth-consensus", "reth-consensus-common", @@ -7531,7 +7498,7 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-blockchain-tree", "reth-chain-state", @@ -7675,7 +7642,7 @@ dependencies = [ "dashmap 6.1.0", "derive_more 1.0.0", "indexmap 2.6.0", - "parking_lot 0.12.3", + "parking_lot", "pprof", "rand 0.8.5", "rand_xorshift", @@ -7745,7 +7712,7 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "pprof", "rand 0.8.5", @@ -7817,7 +7784,7 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "futures", - "parking_lot 0.12.3", + "parking_lot", "reth-consensus", "reth-eth-wire-types", "reth-network-peers", @@ -8090,7 +8057,7 @@ dependencies = [ "reth-metrics", "reth-provider", "reth-tasks", - "socket2 0.5.7", + "socket2", "tikv-jemalloc-ctl", "tokio", "tower 0.4.13", @@ -8238,7 +8205,7 @@ dependencies = [ "eyre", "op-alloy-consensus", "op-alloy-rpc-types-engine", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -8327,7 +8294,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", - "parking_lot 0.12.3", + "parking_lot", "reqwest", "reth-chainspec", "reth-evm", @@ -8505,7 +8472,7 @@ dependencies = [ "itertools 0.13.0", "metrics", "notify", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", @@ -8635,7 +8602,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", @@ -8822,7 +8789,7 @@ dependencies = [ "futures", "jsonrpsee", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", "reth-errors", "reth-evm", @@ -9035,7 +9002,7 @@ version = "1.1.0" dependencies = [ "alloy-primitives", "assert_matches", - "parking_lot 0.12.3", + "parking_lot", "rayon", "reth-db", "reth-db-api", @@ -9161,7 +9128,7 @@ dependencies = [ "criterion", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -9997,7 +9964,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ "once_cell", - "parking_lot 0.12.3", + "parking_lot", "scc", "serial_test_derive", ] @@ -10202,16 +10169,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -10685,10 +10642,10 @@ dependencies = [ "bytes", "libc", "mio 1.0.2", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -11057,7 +11014,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "resolv-conf", "serde", @@ -11118,6 +11075,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -11413,7 +11382,7 @@ checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" dependencies = [ "futures", "js-sys", - "parking_lot 0.12.3", + "parking_lot", "pin-utils", "slab", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 91e3ac6e501..48a6525d5ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -548,7 +548,7 @@ tower = "0.4" tower-http = "0.5" # p2p -discv5 = "0.7.0" +discv5 = "0.8.0" if-addrs = "0.13" # rpc From 60337d9614b25d20347599d590f83646b1de3e7f Mon Sep 17 00:00:00 2001 From: Francis Li Date: Tue, 22 Oct 2024 05:43:25 -0700 Subject: [PATCH 090/970] feat(rpc): Add flags to disable read tx timeout (#11856) --- book/cli/reth/db.md | 3 +++ book/cli/reth/db/diff.md | 3 +++ book/cli/reth/debug/build-block.md | 3 +++ book/cli/reth/debug/execution.md | 3 +++ book/cli/reth/debug/in-memory-merkle.md | 3 +++ book/cli/reth/debug/merkle.md | 3 +++ book/cli/reth/debug/replay-engine.md | 3 +++ book/cli/reth/import.md | 3 +++ book/cli/reth/init-state.md | 3 +++ book/cli/reth/init.md | 3 +++ book/cli/reth/node.md | 3 +++ book/cli/reth/p2p.md | 3 +++ book/cli/reth/prune.md | 3 +++ book/cli/reth/recover/storage-tries.md | 3 +++ book/cli/reth/stage/drop.md | 3 +++ book/cli/reth/stage/dump.md | 3 +++ book/cli/reth/stage/run.md | 3 +++ book/cli/reth/stage/unwind.md | 3 +++ crates/node/core/src/args/database.rs | 14 +++++++++++++- 19 files changed, 67 insertions(+), 1 deletion(-) diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 9e3b32cc0b3..f9a8a158adc 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index ea4c29612ff..f57c6ac364f 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --table The table name to diff. If not specified, all tables are diffed. diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 76ddac306ce..2e6d637d52c 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --trusted-setup-file Overrides the KZG trusted setup by reading from the supplied file diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 202e1452a8a..9ca74897c5e 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 534e6d46c69..3e322a6913d 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index 19bc38acceb..d701803b81c 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index 7a14b9cf09d..dd587620a86 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 7bd8a0079ec..28e085bda71 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --no-state Disables stages that require state. diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index cb221634c40..ddcd3cece37 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + JSONL file with state dump. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cc889e5e35a..cd01accc047 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 34d32209ada..4cd55db1fe0 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -590,6 +590,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Dev testnet: --dev Start the node in dev mode diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 01253705b23..603b451d940 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index e0641256f1c..ed16197a76c 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 1f639cb095a..ecdaabe7781 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index ae21a891830..399b3818c28 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Possible values: - headers: The headers stage within the pipeline diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 291d896902d..4b3de3fb1cb 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index bfe5ff9d6c6..9da3ce0deb6 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --metrics Enable Prometheus metrics. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index d181b3bcade..700ab3d7e7c 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,9 @@ Database: [possible values: true, false] + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index da96deb70c1..0eec6639a11 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,12 +1,14 @@ //! clap [Args](clap::Args) for database configuration +use std::time::Duration; + use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, Arg, Args, Command, Error, }; -use reth_db::ClientVersion; +use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -20,6 +22,9 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Read transaction timeout in seconds, 0 means no timeout. + #[arg(long = "db.read-transaction-timeout")] + pub read_transaction_timeout: Option, } impl DatabaseArgs { @@ -33,9 +38,16 @@ impl DatabaseArgs { &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { + let max_read_transaction_duration = match self.read_transaction_timeout { + None => None, // if not specified, use default value + Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout + Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), + }; + reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) + .with_max_read_transaction_duration(max_read_transaction_duration) } } From b3015c75b1f56fdfbbcce8cdb97a3cde1c642985 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Oct 2024 14:31:37 +0100 Subject: [PATCH 091/970] fix(trie): removing a blinded leaf should result in an error (#11869) Co-authored-by: Roman Krasiuk --- crates/trie/sparse/src/trie.rs | 68 ++++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 6680d7e9b65..42f90d34c8a 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -267,11 +267,11 @@ impl RevealedSparseTrie { /// Remove leaf node from the trie. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { self.prefix_set.insert(path.clone()); - let existing = self.values.remove(path); - if existing.is_none() { - // trie structure unchanged, return immediately - return Ok(()) - } + self.values.remove(path); + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. let mut removed_nodes = self.take_nodes_for_path(path)?; debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); @@ -726,6 +726,7 @@ mod tests { use super::*; use alloy_primitives::U256; + use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; @@ -960,7 +961,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -972,11 +973,11 @@ mod tests { ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x2]), @@ -1015,7 +1016,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1027,11 +1028,11 @@ mod tests { ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), ( Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), - SparseNode::new_leaf(Nibbles::new()) + SparseNode::new_leaf(Nibbles::default()) ), (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), ( @@ -1063,7 +1064,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1097,7 +1098,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1128,7 +1129,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([ - (Nibbles::new(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), ( Nibbles::from_nibbles([0x5, 0x0]), @@ -1147,7 +1148,7 @@ mod tests { pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), BTreeMap::from_iter([( - Nibbles::new(), + Nibbles::default(), SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) ),]) ); @@ -1157,7 +1158,42 @@ mod tests { // Empty pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), - BTreeMap::from_iter([(Nibbles::new(), SparseNode::Empty),]) + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty),]) + ); + } + + #[test] + fn sparse_trie_remove_leaf_blinded() { + let mut sparse = RevealedSparseTrie::default(); + + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse + .reveal_node( + Nibbles::default(), + TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )), + ) + .unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + + // Removing a blinded leaf should result in an error + assert_matches!( + sparse.remove_leaf(&Nibbles::from_nibbles([0x0])), + Err(SparseTrieError::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); } From b20a2715514a320c8633356fb7430d7f37a847e9 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 22 Oct 2024 16:56:04 +0200 Subject: [PATCH 092/970] chore(tree): improved debug logging for block insertion (#11958) --- crates/engine/tree/src/tree/mod.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a2abd3f531d..021b3149ad5 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -259,6 +259,7 @@ impl TreeState { } } } + debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } /// Removes all blocks that are below the finalized block, as well as removing non-canonical @@ -1593,7 +1594,7 @@ where /// Returns an error if we failed to fetch the state from the database. fn state_provider(&self, hash: B256) -> ProviderResult> { if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { - trace!(target: "engine::tree", %hash, "found canonical state for block in memory"); + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory"); // the block leads back to the canonical chain let historical = self.provider.state_by_block_hash(historical)?; return Ok(Some(Box::new(MemoryOverlayStateProvider::new(historical, blocks)))) @@ -1601,13 +1602,13 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - trace!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) } - trace!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } @@ -2137,7 +2138,8 @@ where &mut self, block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash, state_root = ?block.state_root, "Inserting new block into tree"); + if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) } @@ -2206,7 +2208,7 @@ where let hashed_state = HashedPostState::from_bundle_state(&output.state.state); - trace!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Calculating block state root"); + trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); let root_time = Instant::now(); let mut state_root_result = None; @@ -2232,7 +2234,7 @@ where let (state_root, trie_output) = if let Some(result) = state_root_result { result } else { - debug!(target: "engine::tree", persistence_in_progress, "Failed to compute state root in parallel"); + debug!(target: "engine::tree", block=?sealed_block.num_hash(), persistence_in_progress, "Failed to compute state root in parallel"); state_provider.state_root_with_updates(hashed_state.clone())? }; @@ -2252,7 +2254,7 @@ where let root_elapsed = root_time.elapsed(); self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, ?block_number, "Calculated state root"); + debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); let executed = ExecutedBlock { block: sealed_block.clone(), @@ -2301,6 +2303,7 @@ where let mut input = TrieInput::default(); if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(parent_hash) { + debug!(target: "engine::tree", %parent_hash, %historical, "Calculating state root in parallel, parent found in memory"); // Retrieve revert state for historical block. let revert_state = consistent_view.revert_state(historical)?; input.append(revert_state); @@ -2311,6 +2314,7 @@ where } } else { // The block attaches to canonical persisted parent. + debug!(target: "engine::tree", %parent_hash, "Calculating state root in parallel, parent found in disk"); let revert_state = consistent_view.revert_state(parent_hash)?; input.append(revert_state); } From 8b1dfcca3fd1edcbf574e969ce30493f997e6d19 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Oct 2024 15:56:17 +0100 Subject: [PATCH 093/970] test(trie): proptest <-> alloy maps integration (#11962) --- book/sources/Cargo.toml | 1 - crates/trie/sparse/benches/root.rs | 2 ++ crates/trie/sparse/src/trie.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml index 1529af952b9..32fb13990b9 100644 --- a/book/sources/Cargo.toml +++ b/book/sources/Cargo.toml @@ -8,4 +8,3 @@ members = [ # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html resolver = "2" - diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 248e3caeeee..bc221a8f831 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -198,6 +198,8 @@ fn generate_test_data(size: usize) -> HashMap { .new_tree(&mut runner) .unwrap() .current() + .into_iter() + .collect() } criterion_group!(root, calculate_root_from_leaves, calculate_root_from_leaves_repeated); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 42f90d34c8a..a0f7a0c3051 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1276,6 +1276,6 @@ mod tests { ), 1..100, ) - )| { test(updates) }); + )| { test(updates.into_iter().collect()) }); } } From 468ac0d43b5ff9538d89061bdb15604041948562 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 19:00:10 +0200 Subject: [PATCH 094/970] chore: log enode (#11974) --- crates/ethereum/node/src/node.rs | 4 ++-- crates/optimism/node/src/node.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 3df46b4856f..dbd6ce0a134 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -11,7 +11,7 @@ use reth_ethereum_engine_primitives::{ }; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; -use reth_network::NetworkHandle; +use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, NodeTypesWithDB, @@ -314,7 +314,7 @@ where ) -> eyre::Result { let network = ctx.network_builder().await?; let handle = ctx.start_network(network, pool); - + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 22fc1a88ff7..9492bb8c429 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; -use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; +use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, }; @@ -432,6 +432,7 @@ where let network_config = self.network_config(ctx)?; let network = NetworkManager::builder(network_config).await?; let handle = ctx.start_network(network, pool); + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } From e70b112420b1b06fc0d451ec79d2c01a561dd8af Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Oct 2024 19:04:58 +0100 Subject: [PATCH 095/970] feat(trie): update sparse trie hashes below level (#11969) --- crates/trie/sparse/src/trie.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index a0f7a0c3051..39deb50e7a6 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -518,34 +518,35 @@ impl RevealedSparseTrie { } } - /// Update node hashes only if their path exceeds the provided level. - pub fn update_rlp_node_level(&mut self, min_len: usize) { - let mut paths = Vec::from([Nibbles::default()]); + /// Update hashes of the nodes that are located at a level deeper than or equal to the provided + /// depth. Root node has a level of 0. + pub fn update_rlp_node_level(&mut self, depth: usize) { + let mut paths = Vec::from([(Nibbles::default(), 0)]); let mut targets = HashSet::::default(); - while let Some(mut path) = paths.pop() { + while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} SparseNode::Leaf { .. } => { targets.insert(path); } SparseNode::Extension { key, .. } => { - if path.len() >= min_len { + if level >= depth { targets.insert(path); } else { path.extend_from_slice_unchecked(key); - paths.push(path); + paths.push((path, level + 1)); } } SparseNode::Branch { state_mask, .. } => { - if path.len() >= min_len { + if level >= depth { targets.insert(path); } else { for bit in CHILD_INDEX_RANGE { if state_mask.is_bit_set(bit) { let mut child_path = path.clone(); child_path.push_unchecked(bit); - paths.push(child_path); + paths.push((child_path, level + 1)); } } } From 3174bd5c913a76803403d83ded0a9623ac1d9493 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 20:12:15 +0200 Subject: [PATCH 096/970] chore: bump aquamarine (#11965) --- Cargo.lock | 30 +++--------------------------- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59c582fec3f..e1cead60911 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -847,13 +847,13 @@ checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" [[package]] name = "aquamarine" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cc1548309245035eb18aa7f0967da6bc65587005170c56e6ef2788a4cf3f4e" +checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" dependencies = [ "include_dir", "itertools 0.10.5", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", "syn 2.0.80", @@ -5773,30 +5773,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 48a6525d5ef..274af4cd79f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -466,7 +466,7 @@ op-alloy-network = "0.5" op-alloy-consensus = "0.5" # misc -aquamarine = "0.5" +aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = [ "std-blocking-sleep", From 28c61c15b42305edca436e980e29c2cf5087a7c5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 21:46:00 +0200 Subject: [PATCH 097/970] fix: invoke prometheus recorder on op-reth Cli::run (#11982) --- bin/reth/src/cli/mod.rs | 3 +-- crates/optimism/cli/src/lib.rs | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 01f8f73e7b1..01662eb4dcb 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -146,8 +146,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); - // Install the prometheus recorder to be sure to record task - // executor's metrics + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); let runner = CliRunner::default(); diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index e6eed86bf7f..235b4455969 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -54,6 +54,7 @@ use tracing::info; // This allows us to manually enable node metrics features, required for proper jemalloc metric // reporting use reth_node_metrics as _; +use reth_node_metrics::recorder::install_prometheus_recorder; /// The main op-reth cli interface. /// @@ -135,6 +136,9 @@ where let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { From 22171d27bf575a45308063072ea7984846dbd8e1 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:24:25 +0200 Subject: [PATCH 098/970] chore: unpin serde (#11977) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 274af4cd79f..f1506366dfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -503,7 +503,7 @@ rand = "0.8.5" rayon = "1.7" rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" -serde = { version = "=1.0.210", default-features = false } +serde = { version = "1.0", default-features = false } serde_json = "1.0.94" serde_with = "3.3.0" sha2 = { version = "0.10", default-features = false } From 8bfbd977952cbcd42a66f46df235ee9694476a52 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Oct 2024 23:19:34 +0200 Subject: [PATCH 099/970] feat(discv4): add soft_remove_node (#11970) --- crates/net/discv4/Cargo.toml | 2 +- crates/net/discv4/src/lib.rs | 26 +++++++++++++++++++------- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index f008d03b56f..6fb66938885 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -46,7 +46,7 @@ serde = { workspace = true, optional = true } [dev-dependencies] assert_matches.workspace = true rand.workspace = true -tokio = { workspace = true, features = ["macros"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-tracing.workspace = true [features] diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 7963c6e6fd6..c9b14d38b9d 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -829,6 +829,24 @@ impl Discv4Service { /// table. Returns `true` if the node was in the table and `false` otherwise. pub fn remove_node(&mut self, node_id: PeerId) -> bool { let key = kad_key(node_id); + self.remove_key(node_id, key) + } + + /// Removes a `node_id` from the routing table but only if there are enough other nodes in the + /// bucket (bucket must be at least half full) + /// + /// Returns `true` if the node was removed + pub fn soft_remove_node(&mut self, node_id: PeerId) -> bool { + let key = kad_key(node_id); + let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; + if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { + // skip half empty bucket + return false; + } + self.remove_key(node_id, key) + } + + fn remove_key(&mut self, node_id: PeerId, key: discv5::Key) -> bool { let removed = self.kbuckets.remove(&key); if removed { trace!(target: "discv4", ?node_id, "removed node"); @@ -1491,13 +1509,7 @@ impl Discv4Service { // the table, but only if there are enough other nodes in the bucket (bucket must be at // least half full) if failures > (self.config.max_find_node_failures as usize) { - if let Some(bucket) = self.kbuckets.get_bucket(&key) { - if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { - // skip half empty bucket - continue - } - } - self.remove_node(node_id); + self.soft_remove_node(node_id); } } } From 527d344ddab7397ede9f06874b9a00746fa2fa41 Mon Sep 17 00:00:00 2001 From: Moe Mahhouk Date: Tue, 22 Oct 2024 23:52:49 +0200 Subject: [PATCH 100/970] feat: Add reproducible build profile (#10459) Co-authored-by: Roman Krasiuk Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- Cargo.toml | 7 +++++++ Dockerfile.reproducible | 37 +++++++++++++++++++++++++++++++++++++ Makefile | 10 ++++++++++ 3 files changed, 54 insertions(+) create mode 100644 Dockerfile.reproducible diff --git a/Cargo.toml b/Cargo.toml index f1506366dfd..ea90cd7fb64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,6 +288,13 @@ codegen-units = 1 inherits = "release" lto = "fat" +[profile.reproducible] +inherits = "release" +debug = false +panic = "abort" +codegen-units = 1 +overflow-checks = true + [workspace.dependencies] # reth op-reth = { path = "crates/optimism/bin" } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible new file mode 100644 index 00000000000..12c12dd7c7d --- /dev/null +++ b/Dockerfile.reproducible @@ -0,0 +1,37 @@ +# Use the Rust 1.82 image based on Debian Bullseye +FROM rust:1.82-bullseye@sha256:c42c8ca762560c182ba30edda0e0d71a8604040af2672370559d7e854653c66d AS builder + +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 + +# Clone the repository at the specific branch +RUN git clone https://github.com/paradigmxyz/reth /app +WORKDIR /app + +# Checkout the reproducible-build branch +RUN git checkout reproducible-build + +# Get the latest commit timestamp and set SOURCE_DATE_EPOCH +RUN SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) && \ + echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH" >> /etc/environment + +# Set environment variables for reproducibility +ARG RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $(pwd)=." +ENV SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="${RUSTFLAGS}" + +# Set the default features if not provided +ARG FEATURES="jemalloc asm-keccak" + +# Build the project with the reproducible settings +RUN . /etc/environment && \ + cargo build --bin reth --features "${FEATURES}" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + +# Create a minimal final image with just the binary +FROM scratch AS binaries + +# Copy the compiled binary from the builder stage +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth diff --git a/Makefile b/Makefile index 908f1ef24da..5ad7abac675 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,16 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" +SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) +.PHONY: reproducible +reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently + SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $$(pwd)=." \ + cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. cargo build --bin reth --features "$(FEATURES)" From d68dca1a26b69e4784b7b4671b05641025e657fc Mon Sep 17 00:00:00 2001 From: alpharush <0xalpharush@protonmail.com> Date: Tue, 22 Oct 2024 17:34:09 -0500 Subject: [PATCH 101/970] chore: fix cargo feature warning (#11900) Co-authored-by: Matthias Seitz Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- bin/reth/Cargo.toml | 2 +- crates/cli/commands/Cargo.toml | 7 ++++++- crates/cli/commands/src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 8380915d463..ffd1998b24e 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -101,7 +101,7 @@ tempfile.workspace = true [features] default = ["jemalloc"] -dev = ["reth-cli-commands/dev"] +dev = ["reth-cli-commands/arbitrary"] asm-keccak = [ "reth-node-core/asm-keccak", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index e307859dfd8..6f4b1008f29 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -88,10 +88,15 @@ reth-discv4.workspace = true [features] default = [] -dev = [ +arbitrary = [ "dep:proptest", "dep:arbitrary", "dep:proptest-arbitrary-interop", "reth-primitives/arbitrary", "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", ] diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 33a38ddbc01..166ea438fb9 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -20,7 +20,7 @@ pub mod p2p; pub mod prune; pub mod recover; pub mod stage; -#[cfg(feature = "dev")] +#[cfg(feature = "arbitrary")] pub mod test_vectors; pub use node::NodeCommand; From 74eb37523a8ad74b7748701703ea3f3542e5fec7 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 23 Oct 2024 02:38:44 +0400 Subject: [PATCH 102/970] fix: correctly poll `BasicBlockDownloader` (#11981) --- crates/engine/tree/src/engine.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index c1571ed8217..914121adce5 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -113,9 +113,11 @@ where } // advance the downloader - if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { - // delegate the downloaded blocks to the handler - self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + if let Poll::Ready(outcome) = self.downloader.poll(cx) { + if let DownloadOutcome::Blocks(blocks) = outcome { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + } continue } From 6e5176221ff8b5cacea12c86dc52feba0de1593a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 01:20:19 +0200 Subject: [PATCH 103/970] fix: ping oldest more often (#11988) --- crates/net/discv4/src/config.rs | 8 +++----- crates/net/discv4/src/lib.rs | 6 ++++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 4fae31f585a..c934f3361a1 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -8,8 +8,6 @@ use alloy_rlp::Encodable; use reth_net_banlist::BanList; use reth_net_nat::{NatResolver, ResolveNatInterval}; use reth_network_peers::NodeRecord; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, time::Duration, @@ -17,7 +15,7 @@ use std::{ /// Configuration parameters that define the performance of the discovery network. #[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4Config { /// Whether to enable the incoming packet filter. Default: false. pub enable_packet_filter: bool, @@ -118,7 +116,7 @@ impl Default for Discv4Config { // Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, - ping_interval: Duration::from_secs(60 * 10), + ping_interval: Duration::from_secs(10), // Unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), bond_expiration: Duration::from_secs(60 * 60), @@ -144,7 +142,7 @@ impl Default for Discv4Config { /// Builder type for [`Discv4Config`] #[derive(Clone, Debug, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4ConfigBuilder { config: Discv4Config, } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index c9b14d38b9d..77f64ff8297 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -870,7 +870,9 @@ impl Discv4Service { false } - /// Update the entry on RE-ping + /// Update the entry on RE-ping. + /// + /// Invoked when we received the Pong to our [`PingReason::RePing`] ping. /// /// On re-ping we check for a changed `enr_seq` if eip868 is enabled and when it changed we sent /// a followup request to retrieve the updated ENR @@ -2259,7 +2261,7 @@ impl NodeEntry { impl NodeEntry { /// Returns true if the node should be re-pinged. fn is_expired(&self) -> bool { - self.last_seen.elapsed() > ENDPOINT_PROOF_EXPIRATION + self.last_seen.elapsed() > (ENDPOINT_PROOF_EXPIRATION / 2) } } From ee1260a1de29325c52a756cc84ef4fd49f7deb19 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 11:38:45 +0200 Subject: [PATCH 104/970] refactor(tx-pool): move `is_underpriced` to `ValidPoolTransaction` impl (#11938) --- crates/transaction-pool/src/pool/txpool.rs | 49 +------------------ crates/transaction-pool/src/validate/mod.rs | 53 +++++++++++++++++++++ 2 files changed, 54 insertions(+), 48 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 4fbec1105aa..03e69c39067 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1511,52 +1511,6 @@ impl AllTransactions { Ok(new_blob_tx) } - /// Returns true if the replacement candidate is underpriced and can't replace the existing - /// transaction. - #[inline] - fn is_underpriced( - existing_transaction: &ValidPoolTransaction, - maybe_replacement: &ValidPoolTransaction, - price_bumps: &PriceBumpConfig, - ) -> bool { - let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); - - if maybe_replacement.max_fee_per_gas() <= - existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 - { - return true - } - - let existing_max_priority_fee_per_gas = - existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); - let replacement_max_priority_fee_per_gas = - maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); - - if replacement_max_priority_fee_per_gas <= - existing_max_priority_fee_per_gas * (100 + price_bump) / 100 && - existing_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas != 0 - { - return true - } - - // check max blob fee per gas - if let Some(existing_max_blob_fee_per_gas) = - existing_transaction.transaction.max_fee_per_blob_gas() - { - // this enforces that blob txs can only be replaced by blob txs - let replacement_max_blob_fee_per_gas = - maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or(0); - if replacement_max_blob_fee_per_gas <= - existing_max_blob_fee_per_gas * (100 + price_bump) / 100 - { - return true - } - } - - false - } - /// Inserts a new _valid_ transaction into the pool. /// /// If the transaction already exists, it will be replaced if not underpriced. @@ -1671,8 +1625,7 @@ impl AllTransactions { let maybe_replacement = transaction.as_ref(); // Ensure the new transaction is not underpriced - if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) - { + if existing_transaction.is_underpriced(maybe_replacement, &self.price_bumps) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, existing: *entry.get().transaction.hash(), diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4395cc97908..4a82a1a148f 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -4,6 +4,7 @@ use crate::{ error::InvalidPoolTransactionError, identifier::{SenderId, TransactionId}, traits::{PoolTransaction, TransactionOrigin}, + PriceBumpConfig, }; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; @@ -372,6 +373,58 @@ impl ValidPoolTransaction { pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool { self.is_eip4844() != other.is_eip4844() } + + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to + /// an existing transaction in the pool. + /// + /// A transaction is considered underpriced if it doesn't meet the required fee bump threshold. + /// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844), + /// the blob-specific fees. + #[inline] + pub(crate) fn is_underpriced( + &self, + maybe_replacement: &Self, + price_bumps: &PriceBumpConfig, + ) -> bool { + // Retrieve the required price bump percentage for this type of transaction. + // + // The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`. + let price_bump = price_bumps.price_bump(self.tx_type()); + + // Check if the max fee per gas is underpriced. + if maybe_replacement.max_fee_per_gas() <= self.max_fee_per_gas() * (100 + price_bump) / 100 + { + return true + } + + let existing_max_priority_fee_per_gas = + self.transaction.max_priority_fee_per_gas().unwrap_or_default(); + let replacement_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or_default(); + + // Check max priority fee per gas (relevant for EIP-1559 transactions only) + if existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas <= + existing_max_priority_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + + // Check max blob fee per gas + if let Some(existing_max_blob_fee_per_gas) = self.transaction.max_fee_per_blob_gas() { + // This enforces that blob txs can only be replaced by blob txs + let replacement_max_blob_fee_per_gas = + maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default(); + if replacement_max_blob_fee_per_gas <= + existing_max_blob_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + } + + false + } } impl>> ValidPoolTransaction { From 7fd28df2b4a01cc88a85ccb46cebbbff547123c4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 11:39:12 +0200 Subject: [PATCH 105/970] fix: re-establish bond on ping (#11989) --- crates/net/discv4/src/lib.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 77f64ff8297..e955e45df61 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1048,11 +1048,23 @@ impl Discv4Service { let old_enr = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value_mut().update_with_enr(ping.enr_sq) } kbucket::Entry::Pending(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value().update_with_enr(ping.enr_sq) } kbucket::Entry::Absent(entry) => { @@ -1225,7 +1237,8 @@ impl Discv4Service { self.update_on_pong(node, pong.enr_sq); } PingReason::EstablishBond => { - // nothing to do here + // same as `InitialInsert` which renews the bond if the peer is in the table + self.update_on_pong(node, pong.enr_sq); } PingReason::RePing => { self.update_on_reping(node, pong.enr_sq); @@ -2270,8 +2283,7 @@ impl NodeEntry { enum PingReason { /// Initial ping to a previously unknown peer that was inserted into the table. InitialInsert, - /// Initial ping to a previously unknown peer that didn't fit into the table. But we still want - /// to establish a bond. + /// A ping to a peer to establish a bond (endpoint proof). EstablishBond, /// Re-ping a peer. RePing, From ff04de380dc9f107d0aec4a4f0b1bc5027c891c4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 23 Oct 2024 16:53:29 +0400 Subject: [PATCH 106/970] chore: bump alloy-eip7702 (#11986) Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 133 +++++++++--------- Cargo.toml | 60 ++++---- book/sources/Cargo.toml | 13 +- book/sources/exex/hello-world/Cargo.toml | 8 +- book/sources/exex/tracking-state/Cargo.toml | 10 +- crates/primitives/src/transaction/sidecar.rs | 90 ++---------- .../codecs/src/alloy/authorization_list.rs | 14 +- crates/transaction-pool/src/blobstore/disk.rs | 13 +- 8 files changed, 141 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1cead60911..7e10bdb0a67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42642aed67f938363d9c7543e5ca4163cfb4205d9ec15fe933dc4e865d2932dd" +checksum = "cdf02dfacfc815214f9b54ff50d54900ba527a68fd73e2c5637ced3460005045" dependencies = [ "alloy-eips", "alloy-primitives", @@ -161,13 +161,14 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeffd2590ce780ddfaa9d0ae340eb2b4e08627650c4676eef537cef0b4bf535d" +checksum = "64ffc577390ce50234e02d841214b3dc0bea6aaaae8e04bbf3cb82e9a45da9eb" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "derive_more 1.0.0", "k256", "rand 0.8.5", "serde", @@ -176,9 +177,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbc52a30df46f9831ed74557dfad0d94b12420393662a8b9ef90e2d6c8cb4b0" +checksum = "769da342b6bcd945013925ef4c40763cc82f11e002c60702dba8b444bb60e5a7" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -197,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0787d1688b9806290313cc335d416cc7ee39b11e3245f3d218544c62572d92ba" +checksum = "c698ce0ada980b17f0323e1a28c7da8a2e9abc6dff5be9ee33d1525b28ac46b6" dependencies = [ "alloy-primitives", "alloy-serde", @@ -220,9 +221,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55a16a5f9ca498a217c060414bcd1c43e934235dc8058b31b87dcd69ff4f105" +checksum = "c1050e1d65524c030b17442b6546b564da51fdab7f71bd534b001ba65f2ebb16" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -234,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d236a8c3e1d5adc09b1b63c81815fc9b757d9a4ba9482cc899f9679b55dd437" +checksum = "da34a18446a27734473af3d77eb21c5ebbdf97ea8eb65c39c0b50916bc659023" dependencies = [ "alloy-consensus", "alloy-eips", @@ -255,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15a0990fa8a56d85a42d6a689719aa4eebf5e2f1a5c5354658c0bfc52cac9a" +checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" dependencies = [ "alloy-consensus", "alloy-eips", @@ -268,9 +269,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2249f3c3ce446cf4063fe3d1aa7530823643c2706a1cc63045e0683ebc497a0a" +checksum = "439fc6a933b9f8e8b272a8cac35dbeabaf2b2eaf9590482bebedb5782153118e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -317,9 +318,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "316f522bb6f9ac3805132112197957013b570e20cfdad058e8339dae6030c849" +checksum = "c45dbc0e3630becef9e988b69d43339f68d67e32a854e3c855bc28bd5031895b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -358,9 +359,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222cd9b17b1c5ad48de51a88ffbdb17f17145170288f22662f80ac88739125e6" +checksum = "1e3961a56e10f44bfd69dd3f4b0854b90b84c612b0c43708e738933e8b47f93a" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -399,9 +400,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b2ab59712c594c9624aaa69e38e4d38f180cb569f1fa46cdaf8c21fd50793e5" +checksum = "917e5504e4f8f7e39bdc322ff81589ed54c1e462240adaeb58162c2d986a5a2b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -424,9 +425,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba21284319e12d053baa204d438db6c1577aedd94c1298e4becefdac1f9cec87" +checksum = "07c7eb2dc6db1dd41e5e7bd2b98a38813854efc30e034afd90d1e420e7f3de2b" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -437,9 +438,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416cc9f391d0b876c4c8da85f7131e771a88a55b917cc9a35e1724d9409e3b1c" +checksum = "bd468a4e3eddcd9d612cad657852de4b7475ac2080e7af9224fbf1df20ddffe0" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -449,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba40bea86c3102b9ed9b3be579e32e0b3e54e766248d873de5fc0437238c8df2" +checksum = "2640928d9b1d43bb1cec7a0d615e10c2b407c5bd8ff1fcbe49e6318a2b62d731" dependencies = [ "alloy-primitives", "alloy-serde", @@ -460,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b535781fe224c101c3d957b514cb9f438d165ff0280e5c0b2f87a0d9a2950593" +checksum = "64f731ad2ef8d7dd75a4d28214f4922a5b683feee1e6df35bd7b427315f94366" dependencies = [ "alloy-eips", "alloy-primitives", @@ -474,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4303deacf4cbf12ed4431a5a1bbc3284f0defb4b8b72d9aa2b888656cc5ae657" +checksum = "06bd0757bfb3eccde06ee3f4e378f5839fe923d40956cff586018d4427a15bb5" dependencies = [ "alloy-primitives", "serde", @@ -484,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44848fced3b42260b9cb61f22102246636dfe5a2d0132f8d10a617df3cb1a74b" +checksum = "7d3d95c3bf03efbb7bdc1d097e2931f520aac47438b709ccd8f065a7793dd371" dependencies = [ "alloy-consensus", "alloy-eips", @@ -505,9 +506,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35894711990019fafff0012b82b9176cbb744516eb2a9bbe6b8e5cae522163ee" +checksum = "e855b0daccf2320ba415753c3fed422abe9d3ad5d77b2d6cafcc9bcf32fe387f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -526,9 +527,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac6250cad380a005ecb5ffc6d2facf03df0e72628d819a63dd8c3ade7a766ff" +checksum = "eca3753b9894235f915437f908644e737d8714c686ce4e8d03afbf585b23f074" dependencies = [ "alloy-eips", "alloy-primitives", @@ -539,9 +540,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f568c5624881896d8a25e19acbdcbabadd8df339427ea2f10b2ee447d57c4509" +checksum = "ae58a997afde032cd021547c960a53eef6245f47969dd71886e9f63fb45a6048" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -553,9 +554,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a37d2e1ed9b7daf20ad0b3e0092613cbae46737e0e988b23caa556c7067ce6" +checksum = "667e45c882fda207d4cc94c4bb35e24a23347955113dcb236a5e4e0eaddef826" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -565,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2843c195675f06b29c09a4315cccdc233ab5bdc7c0a3775909f9f0cab5e9ae0f" +checksum = "35c2661ca6785add8fc37aff8005439c806ffad58254c19939c6f59ac0d6596e" dependencies = [ "alloy-primitives", "arbitrary", @@ -577,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b2a00d9803dfef99963303ffe41a7bf2221f3342f0a503d6741a9f4a18e5e5" +checksum = "67eca011160d18a7dc6d8cdc1e8dc13e2e86c908f8e41b02aa76e429d6fe7085" dependencies = [ "alloy-primitives", "async-trait", @@ -591,9 +592,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2505d4f8c98dcae86152d58d549cb4bcf953f8352fca903410e0a0ef535571" +checksum = "1c54b195a6ee5a83f32e7c697b4e6b565966737ed5a2ef9176bbbb39f720d023" dependencies = [ "alloy-consensus", "alloy-network", @@ -679,9 +680,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dc2c8f6b8c227ef0398f702d954c4ab572c2ead3c1ed4a5157aa1cbaf959747" +checksum = "3e4a136e733f55fef0870b81e1f8f1db28e78973d1b1ae5a5df642ba39538a07" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -699,9 +700,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd328e990d57f4c4e63899fb2c26877597d6503f8e0022a3d71b2d753ecbfc0c" +checksum = "1a6b358a89b6d107b92d09b61a61fbc04243942182709752c796f4b29402cead" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -714,9 +715,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89aea26aaf1d67904a7ff95ec4a24ddd5e7d419a6945f641b885962d7c2803e2" +checksum = "4a899c43b7f5e3bc83762dfe5128fccd9cfa99f1f03c5f26bbfb2495ae8dcd35" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -733,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e222e950ecc4ea12fbfb524b9a2275cac2cd5f57c8ce25bcaf1bd3ff80dd8fc8" +checksum = "d27aac1246e13c9e6fa0c784fbb0c56872c6224f78dbde388bb2213ccdf8af02" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -9264,9 +9265,9 @@ dependencies = [ [[package]] name = "revm" -version = "16.0.0" +version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e44692d5736cc44c697a372e507890f8797f06d1541c5f4b9bec594d90fd8a" +checksum = "2eab16cb0a8cd5ac88b11230b20df588b7e8aae7dfab4b3f830e98aebeb4b365" dependencies = [ "auto_impl", "cfg-if", @@ -9279,9 +9280,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64e2246ad480167548724eb9c9c66945241b867c7d50894de3ca860c9823a45" +checksum = "1e29c662f7887f3b659d4b0fd234673419a8fcbeaa1ecc29bf7034c0a75cc8ea" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9298,9 +9299,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "12.0.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f89940d17d5d077570de1977f52f69049595322e237cb6c754c3d47f668f023" +checksum = "fac2034454f8bc69dc7d3c94cdb1b57559e27f5ef0518771f1787de543d7d6a1" dependencies = [ "revm-primitives", "serde", @@ -9308,9 +9309,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8f816aaea3245cbdbe7fdd84955df33597f9322c7912c3e3ba7bc855e03211f" +checksum = "7a88c8c7c5f9b988a9e65fc0990c6ce859cdb74114db705bd118a96d22d08027" dependencies = [ "aurora-engine-modexp", "blst", @@ -9328,9 +9329,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "12.0.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532411bbde45a46707c1d434dcdc29866cf261c1b748fb01b303ce3b4310b361" +checksum = "0d11fa1e195b0bebaf3fb18596f314a13ba3a4cb1fdd16d3465934d812fd921e" dependencies = [ "alloy-eip2930", "alloy-eip7702", diff --git a/Cargo.toml b/Cargo.toml index ea90cd7fb64..e8f10229ed5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -418,9 +418,9 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "16.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.9.0" -revm-primitives = { version = "12.0.0", features = [ +revm = { version = "17.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.10.0" +revm-primitives = { version = "13.0.0", features = [ "std", ], default-features = false } @@ -432,39 +432,39 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.5.2", default-features = false } -alloy-eips = { version = "0.5.2", default-features = false } -alloy-genesis = { version = "0.5.2", default-features = false } -alloy-json-rpc = { version = "0.5.2", default-features = false } -alloy-network = { version = "0.5.2", default-features = false } -alloy-network-primitives = { version = "0.5.2", default-features = false } -alloy-node-bindings = { version = "0.5.2", default-features = false } -alloy-provider = { version = "0.5.2", features = [ +alloy-consensus = { version = "0.5.3", default-features = false } +alloy-eips = { version = "0.5.3", default-features = false } +alloy-genesis = { version = "0.5.3", default-features = false } +alloy-json-rpc = { version = "0.5.3", default-features = false } +alloy-network = { version = "0.5.3", default-features = false } +alloy-network-primitives = { version = "0.5.3", default-features = false } +alloy-node-bindings = { version = "0.5.3", default-features = false } +alloy-provider = { version = "0.5.3", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.5.2", default-features = false } -alloy-rpc-client = { version = "0.5.2", default-features = false } -alloy-rpc-types = { version = "0.5.2", features = [ +alloy-pubsub = { version = "0.5.3", default-features = false } +alloy-rpc-client = { version = "0.5.3", default-features = false } +alloy-rpc-types = { version = "0.5.3", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.5.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.5.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.5.2", default-features = false } -alloy-rpc-types-debug = { version = "0.5.2", default-features = false } -alloy-rpc-types-engine = { version = "0.5.2", default-features = false } -alloy-rpc-types-eth = { version = "0.5.2", default-features = false } -alloy-rpc-types-mev = { version = "0.5.2", default-features = false } -alloy-rpc-types-trace = { version = "0.5.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.5.2", default-features = false } -alloy-serde = { version = "0.5.2", default-features = false } -alloy-signer = { version = "0.5.2", default-features = false } -alloy-signer-local = { version = "0.5.2", default-features = false } -alloy-transport = { version = "0.5.2" } -alloy-transport-http = { version = "0.5.2", features = [ +alloy-rpc-types-admin = { version = "0.5.3", default-features = false } +alloy-rpc-types-anvil = { version = "0.5.3", default-features = false } +alloy-rpc-types-beacon = { version = "0.5.3", default-features = false } +alloy-rpc-types-debug = { version = "0.5.3", default-features = false } +alloy-rpc-types-engine = { version = "0.5.3", default-features = false } +alloy-rpc-types-eth = { version = "0.5.3", default-features = false } +alloy-rpc-types-mev = { version = "0.5.3", default-features = false } +alloy-rpc-types-trace = { version = "0.5.3", default-features = false } +alloy-rpc-types-txpool = { version = "0.5.3", default-features = false } +alloy-serde = { version = "0.5.3", default-features = false } +alloy-signer = { version = "0.5.3", default-features = false } +alloy-signer-local = { version = "0.5.3", default-features = false } +alloy-transport = { version = "0.5.3" } +alloy-transport-http = { version = "0.5.3", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.5.2", default-features = false } -alloy-transport-ws = { version = "0.5.2", default-features = false } +alloy-transport-ipc = { version = "0.5.3", default-features = false } +alloy-transport-ws = { version = "0.5.3", default-features = false } # op op-alloy-rpc-types = "0.5" diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml index 32fb13990b9..b374ad798b5 100644 --- a/book/sources/Cargo.toml +++ b/book/sources/Cargo.toml @@ -1,10 +1,13 @@ [workspace] -members = [ - "exex/hello-world", - "exex/remote", - "exex/tracking-state", -] +members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html resolver = "2" + +[patch.'https://github.com/paradigmxyz/reth'] +reth = { path = "../../bin/reth" } +reth-exex = { path = "../../crates/exex/exex" } +reth-node-ethereum = { path = "../../crates/ethereum/node" } +reth-tracing = { path = "../../crates/tracing" } +reth-node-api = { path = "../../crates/node/api" } diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/sources/exex/hello-world/Cargo.toml index e5d32a14054..c466018c667 100644 --- a/book/sources/exex/hello-world/Cargo.toml +++ b/book/sources/exex/hello-world/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth -reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging -eyre = "0.6" # Easy error handling +eyre = "0.6" # Easy error handling futures-util = "0.3" # Stream utilities for consuming notifications diff --git a/book/sources/exex/tracking-state/Cargo.toml b/book/sources/exex/tracking-state/Cargo.toml index 3ce21b0c340..a8e862d0a73 100644 --- a/book/sources/exex/tracking-state/Cargo.toml +++ b/book/sources/exex/tracking-state/Cargo.toml @@ -5,10 +5,12 @@ edition = "2021" [dependencies] reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } -eyre = "0.6" # Easy error handling -futures-util = "0.3" # Stream utilities for consuming notifications +eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications alloy-primitives = "0.8.7" diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index edc1427d1fe..e901cbfc08d 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,11 +1,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Signature, Transaction, TransactionSigned}; -use alloy_consensus::{ - constants::EIP4844_TX_TYPE_ID, transaction::TxEip4844, TxEip4844WithSidecar, -}; -use alloy_primitives::{keccak256, TxHash}; -use alloy_rlp::{Decodable, Error as RlpError, Header}; +use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; +use alloy_primitives::TxHash; +use alloy_rlp::Header; use serde::{Deserialize, Serialize}; #[doc(inline)] @@ -14,8 +12,6 @@ pub use alloy_eips::eip4844::BlobTransactionSidecar; #[cfg(feature = "c-kzg")] pub use alloy_eips::eip4844::BlobTransactionValidationError; -use alloc::vec::Vec; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -36,7 +32,7 @@ impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a /// [`BlobTransactionSidecar`]. /// - /// Returns an error if the signed transaction is not [`TxEip4844`] + /// Returns an error if the signed transaction is not [`Transaction::Eip4844`] pub fn try_from_signed( tx: TransactionSigned, sidecar: BlobTransactionSidecar, @@ -57,7 +53,7 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// See also [`TxEip4844::validate_blob`] + /// See also [`alloy_consensus::TxEip4844::validate_blob`] #[cfg(feature = "c-kzg")] pub fn validate( &self, @@ -163,7 +159,7 @@ impl BlobTransaction { // The payload length is the length of the `tranascation_payload_body` list, plus the // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.fields_len(); + let payload_length = tx_length + self.transaction.sidecar.rlp_encoded_fields_length(); // We use the calculated payload len to construct the first list header, which encompasses // everything in the tx - the length of the second, inner list header is part of @@ -188,74 +184,17 @@ impl BlobTransaction { /// Note: this should be used only when implementing other RLP decoding methods, and does not /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { - // decode the _first_ list header for the rest of the transaction - let outer_header = Header::decode(data)?; - if !outer_header.list { - return Err(RlpError::Custom("PooledTransactions blob tx must be encoded as a list")) - } - - let outer_remaining_len = data.len(); - - // Now we need to decode the inner 4844 transaction and its signature: - // - // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - let inner_header = Header::decode(data)?; - if !inner_header.list { - return Err(RlpError::Custom( - "PooledTransactions inner blob tx must be encoded as a list", - )) - } - - let inner_remaining_len = data.len(); - - // inner transaction - let transaction = TxEip4844::decode_fields(data)?; - - // signature - let signature = Signature::decode_rlp_vrs(data)?; - - // the inner header only decodes the transaction and signature, so we check the length here - let inner_consumed = inner_remaining_len - data.len(); - if inner_consumed != inner_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode(data)?; - - // # Calculating the hash - // - // The full encoding of the `PooledTransaction` response is: - // `tx_type (0x03) || rlp([tx_payload_body, blobs, commitments, proofs])` - // - // The transaction hash however, is: - // `keccak256(tx_type (0x03) || rlp(tx_payload_body))` - // - // Note that this is `tx_payload_body`, not `[tx_payload_body]`, which would be - // `[[chain_id, nonce, max_priority_fee_per_gas, ...]]`, i.e. a list within a list. - // - // Because the pooled transaction encoding is different than the hash encoding for - // EIP-4844 transactions, we do not use the original buffer to calculate the hash. - // - // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a - // signature for hashing without a header. We then hash the result. - let mut buf = Vec::new(); - transaction.encode_with_signature(&signature, &mut buf, false); - let hash = keccak256(&buf); - - // the outer header is for the entire transaction, so we check the length here - let outer_consumed = outer_remaining_len - data.len(); - if outer_consumed != outer_header.payload_length { - return Err(RlpError::UnexpectedLength) - } + let (transaction, signature, hash) = + TxEip4844WithSidecar::decode_signed_fields(data)?.into_parts(); - Ok(Self { transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, hash, signature }) + Ok(Self { transaction, hash, signature }) } } /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { + use alloc::vec::Vec; use alloy_eips::eip4844::env_settings::EnvKzgSettings; use c_kzg::{KzgCommitment, KzgProof}; @@ -285,12 +224,12 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar mod tests { use super::*; use crate::{kzg::Blob, PooledTransactionsElement}; + use alloc::vec::Vec; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4844::Bytes48, }; use alloy_primitives::hex; - use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; #[test] @@ -392,7 +331,7 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -423,10 +362,11 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = + BlobTransactionSidecar::rlp_decode_fields(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 2b013c0d3c5..6dc36956d24 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -44,11 +44,9 @@ impl Compact for SignedAuthorization { where B: bytes::BufMut + AsMut<[u8]>, { - let signature = self.signature(); - let (v, r, s) = (signature.v(), signature.r(), signature.s()); - buf.put_u8(v.y_parity_byte()); - buf.put_slice(r.as_le_slice()); - buf.put_slice(s.as_le_slice()); + buf.put_u8(self.y_parity()); + buf.put_slice(self.r().as_le_slice()); + buf.put_slice(self.s().as_le_slice()); // to_compact doesn't write the len to buffer. // By placing it as last, we don't need to store it either. @@ -56,17 +54,15 @@ impl Compact for SignedAuthorization { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { - let y = alloy_primitives::Parity::Parity(buf.get_u8() == 1); + let y_parity = buf.get_u8(); let r = U256::from_le_slice(&buf[0..32]); buf.advance(32); let s = U256::from_le_slice(&buf[0..32]); buf.advance(32); - let signature = alloy_primitives::Signature::from_rs_and_parity(r, s, y) - .expect("invalid authorization signature"); let (auth, buf) = AlloyAuthorization::from_compact(buf, len); - (auth.into_signed(signature), buf) + (Self::new_unchecked(auth, y_parity, r, s), buf) } } diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index e168a1c1111..787d4985ff1 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -3,7 +3,6 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{TxHash, B256}; -use alloy_rlp::{Decodable, Encodable}; use parking_lot::{Mutex, RwLock}; use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; @@ -204,8 +203,8 @@ impl DiskFileBlobStoreInner { /// Ensures blob is in the blob cache and written to the disk. fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); self.blob_cache.lock().insert(tx, data); let size = self.write_one_encoded(tx, &buf)?; @@ -219,8 +218,8 @@ impl DiskFileBlobStoreInner { let raw = txs .iter() .map(|(tx, data)| { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); (self.blob_disk_file(*tx), buf) }) .collect::>(); @@ -312,7 +311,7 @@ impl DiskFileBlobStoreInner { } } }; - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(Some) .map_err(BlobStoreError::DecodeError) } @@ -322,7 +321,7 @@ impl DiskFileBlobStoreInner { self.read_many_raw(txs) .into_iter() .filter_map(|(tx, data)| { - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(|sidecar| (tx, sidecar)) .ok() }) From cf4e7745427c8e57e576acb2458f10e6a89d89c0 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Wed, 23 Oct 2024 20:12:44 +0700 Subject: [PATCH 107/970] chore: `Ethereum` -> `Optimism` in comment of `optimism_payload` (#11998) --- crates/optimism/payload/src/builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index b6ab9b87956..3ed00c49aec 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -146,12 +146,12 @@ where } } -/// Constructs an Ethereum transaction payload from the transactions sent through the +/// Constructs an Optimism transaction payload from the transactions sent through the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in /// the payload attributes, the transaction pool will be ignored and the only transactions /// included in the payload will be those sent through the attributes. /// -/// Given build arguments including an Ethereum client, transaction pool, +/// Given build arguments including an Optimism client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] From 386379efd55e491013d010b0a377771317a21e14 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:58:56 +0200 Subject: [PATCH 108/970] test(tokio-util): add unit tests for `EventSender` (#11980) --- crates/tokio-util/src/event_sender.rs | 93 +++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs index a4e9815388c..16208ee19c0 100644 --- a/crates/tokio-util/src/event_sender.rs +++ b/crates/tokio-util/src/event_sender.rs @@ -40,3 +40,96 @@ impl EventSender { EventStream::new(self.sender.subscribe()) } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio::{ + task, + time::{timeout, Duration}, + }; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_broadcast_to_listener() { + let sender = EventSender::default(); + + // Create a listener for the events + let mut listener = sender.new_listener(); + + // Broadcast an event + sender.notify("event1"); + + // Check if the listener receives the event + let received_event = listener.next().await; + assert_eq!(received_event, Some("event1")); + } + + #[tokio::test] + async fn test_event_no_listener() { + let sender = EventSender::default(); + + // Broadcast an event with no listeners + sender.notify("event2"); + + // Ensure it doesn't panic or fail when no listeners are present + // (this test passes if it runs without errors). + } + + #[tokio::test] + async fn test_multiple_listeners_receive_event() { + let sender = EventSender::default(); + + // Create two listeners + let mut listener1 = sender.new_listener(); + let mut listener2 = sender.new_listener(); + + // Broadcast an event + sender.notify("event3"); + + // Both listeners should receive the same event + let event1 = listener1.next().await; + let event2 = listener2.next().await; + + assert_eq!(event1, Some("event3")); + assert_eq!(event2, Some("event3")); + } + + #[tokio::test] + async fn test_bounded_channel_size() { + // Create a channel with size 2 + let sender = EventSender::new(2); + + // Create a listener + let mut listener = sender.new_listener(); + + // Broadcast 3 events, which exceeds the channel size + sender.notify("event4"); + sender.notify("event5"); + sender.notify("event6"); + + // Only the last two should be received due to the size limit + let received_event1 = listener.next().await; + let received_event2 = listener.next().await; + + assert_eq!(received_event1, Some("event5")); + assert_eq!(received_event2, Some("event6")); + } + + #[tokio::test] + async fn test_event_listener_timeout() { + let sender = EventSender::default(); + let mut listener = sender.new_listener(); + + // Broadcast an event asynchronously + task::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + sender.notify("delayed_event"); + }); + + // Use a timeout to ensure that the event is received within a certain time + let result = timeout(Duration::from_millis(100), listener.next()).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some("delayed_event")); + } +} From 5e0ba4104d85dcf2be9a7df91347cb84996d245f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:11:59 +0200 Subject: [PATCH 109/970] tx-pool: migrate `ensure_max_init_code_size` to `PoolTransaction` trait (#11976) --- crates/transaction-pool/src/traits.rs | 22 ++++++++++++++++- crates/transaction-pool/src/validate/eth.rs | 26 ++++----------------- 2 files changed, 26 insertions(+), 22 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 56da11fe696..cedec56063b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -2,7 +2,7 @@ use crate::{ blobstore::BlobStoreError, - error::PoolResult, + error::{InvalidPoolTransactionError, PoolResult}, pool::{state::SubPool, BestTransactionFilter, TransactionEvents}, validate::ValidPoolTransaction, AllTransactionsEvents, @@ -961,6 +961,26 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Returns `chain_id` fn chain_id(&self) -> Option; + + /// Ensures that the transaction's code size does not exceed the provided `max_init_code_size`. + /// + /// This is specifically relevant for contract creation transactions ([`TxKind::Create`]), + /// where the input data contains the initialization code. If the input code size exceeds + /// the configured limit, an [`InvalidPoolTransactionError::ExceedsMaxInitCodeSize`] error is + /// returned. + fn ensure_max_init_code_size( + &self, + max_init_code_size: usize, + ) -> Result<(), InvalidPoolTransactionError> { + if self.kind().is_create() && self.input().len() > max_init_code_size { + Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( + self.size(), + max_init_code_size, + )) + } else { + Ok(()) + } + } } /// Super trait for transactions that can be converted to and from Eth transactions diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 22744c58a79..bf7749fb85c 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,7 +8,7 @@ use crate::{ }, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, - EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, + EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; use alloy_consensus::constants::{ @@ -223,7 +223,7 @@ where // Check whether the init code size has been exceeded. if self.fork_tracker.is_shanghai_activated() { - if let Err(err) = ensure_max_init_code_size(&transaction, MAX_INIT_CODE_BYTE_SIZE) { + if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } } @@ -711,7 +711,7 @@ impl EthTransactionValidatorBuilder { EthTransactionValidator { inner: Arc::new(inner) } } - /// Builds a the [`EthTransactionValidator`] and spawns validation tasks via the + /// Builds a [`EthTransactionValidator`] and spawns validation tasks via the /// [`TransactionValidationTaskExecutor`] /// /// The validator will spawn `additional_tasks` additional tasks for validation. @@ -783,22 +783,6 @@ impl ForkTracker { } } -/// Ensure that the code size is not greater than `max_init_code_size`. -/// `max_init_code_size` should be configurable so this will take it as an argument. -pub fn ensure_max_init_code_size( - transaction: &T, - max_init_code_size: usize, -) -> Result<(), InvalidPoolTransactionError> { - if transaction.kind().is_create() && transaction.input().len() > max_init_code_size { - Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( - transaction.size(), - max_init_code_size, - )) - } else { - Ok(()) - } -} - /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. /// /// Caution: This only checks past the Merge hardfork. @@ -833,8 +817,8 @@ pub fn ensure_intrinsic_gas( mod tests { use super::*; use crate::{ - blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, - EthPooledTransaction, Pool, TransactionPool, + blobstore::InMemoryBlobStore, error::PoolErrorKind, traits::PoolTransaction, + CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; From 252cdf7f3576f76a0ffe6b4086105655b11ba420 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:35:24 +0200 Subject: [PATCH 110/970] storage: add unit tests for `StorageRevertsIter` (#11999) --- .../src/bundle_state/state_reverts.rs | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 09b892562fb..3e1ba2a4b8f 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -76,3 +76,105 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_storage_reverts_iter_empty() { + // Create empty sample data for reverts and wiped entries. + let reverts: Vec<(B256, RevertToSlot)> = vec![]; + let wiped: Vec<(B256, U256)> = vec![]; + + // Create the iterator with the empty data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify that the results are empty. + assert_eq!(results, vec![]); + } + + #[test] + fn test_storage_reverts_iter_reverts_only() { + // Create sample data for only reverts. + let reverts = vec![ + (B256::from_slice(&[4; 32]), RevertToSlot::Destroyed), + (B256::from_slice(&[5; 32]), RevertToSlot::Some(U256::from(40))), + ]; + + // Create the iterator with only reverts and no wiped entries. + let iter = StorageRevertsIter::new(reverts, vec![]); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[4; 32]), U256::ZERO), // Revert slot previous value + (B256::from_slice(&[5; 32]), U256::from(40)), // Only revert present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_wiped_only() { + // Create sample data for only wiped entries. + let wiped = vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), + (B256::from_slice(&[7; 32]), U256::from(60)), + ]; + + // Create the iterator with only wiped entries and no reverts. + let iter = StorageRevertsIter::new(vec![], wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), // Only wiped present. + (B256::from_slice(&[7; 32]), U256::from(60)), // Only wiped present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_interleaved() { + // Create sample data for interleaved reverts and wiped entries. + let reverts = vec![ + (B256::from_slice(&[8; 32]), RevertToSlot::Some(U256::from(70))), + (B256::from_slice(&[9; 32]), RevertToSlot::Some(U256::from(80))), + // Some higher key than wiped + (B256::from_slice(&[15; 32]), RevertToSlot::Some(U256::from(90))), + ]; + + let wiped = vec![ + (B256::from_slice(&[8; 32]), U256::from(75)), // Same key as revert + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped with new key + ]; + + // Create the iterator with the sample data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. + (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. + (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + ] + ); + } +} From 55d98bbc6effb0dde79b3210d0eea2a0424ebc64 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 15:37:39 +0200 Subject: [PATCH 111/970] fix: check failed find nodes requests before sending new ones (#11997) --- crates/net/discv4/src/config.rs | 2 +- crates/net/discv4/src/lib.rs | 135 +++++++++++++++++++++++++++----- 2 files changed, 118 insertions(+), 19 deletions(-) diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index c934f3361a1..38467304db2 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -23,7 +23,7 @@ pub struct Discv4Config { pub udp_egress_message_buffer: usize, /// Size of the channel buffer for incoming messages. pub udp_ingress_message_buffer: usize, - /// The number of allowed failures for `FindNode` requests. Default: 5. + /// The number of allowed consecutive failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: /// 10min. diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index e955e45df61..779c7ee637a 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -743,7 +743,8 @@ impl Discv4Service { trace!(target: "discv4", ?target, "Starting lookup"); let target_key = kad_key(target); - // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes + // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes to which we have + // a valid endpoint proof let ctx = LookupContext::new( target_key.clone(), self.kbuckets @@ -772,7 +773,10 @@ impl Discv4Service { trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes"); for node in closest { - self.find_node(&node, ctx.clone()); + // here we still want to check against previous request failures and if necessary + // re-establish a new endpoint proof because it can be the case that the other node lost + // our entry and no longer has an endpoint proof on their end + self.find_node_checked(&node, ctx.clone()); } } @@ -788,6 +792,22 @@ impl Discv4Service { self.pending_find_nodes.insert(node.id, FindNodeRequest::new(ctx)); } + /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks + /// whether we should should send a new ping first to renew the endpoint proof by checking the + /// previously failed findNode requests. It could be that the node is no longer reachable or + /// lost our entry. + fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { + let max_failures = self.config.max_find_node_failures; + let needs_ping = self + .on_entry(node.id, |entry| entry.exceeds_find_node_failures(max_failures)) + .unwrap_or(true); + if needs_ping { + self.try_ping(*node, PingReason::Lookup(*node, ctx)) + } else { + self.find_node(node, ctx) + } + } + /// Notifies all listeners. /// /// Removes all listeners that are closed. @@ -860,7 +880,7 @@ impl Discv4Service { self.kbuckets.buckets_iter().fold(0, |count, bucket| count + bucket.num_connected()) } - /// Check if the peer has a bond + /// Check if the peer has an active bond. fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { if timestamp.elapsed() < self.config.bond_expiration { @@ -870,6 +890,19 @@ impl Discv4Service { false } + /// Applies a closure on the pending or present [`NodeEntry`]. + fn on_entry(&mut self, peer_id: PeerId, f: F) -> Option + where + F: FnOnce(&NodeEntry) -> R, + { + let key = kad_key(peer_id); + match self.kbuckets.entry(&key) { + BucketEntry::Present(entry, _) => Some(f(entry.value())), + BucketEntry::Pending(mut entry, _) => Some(f(entry.value())), + _ => None, + } + } + /// Update the entry on RE-ping. /// /// Invoked when we received the Pong to our [`PingReason::RePing`] ping. @@ -929,7 +962,7 @@ impl Discv4Service { match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, old_status) => { // endpoint is now proven - entry.value_mut().has_endpoint_proof = true; + entry.value_mut().establish_proof(); entry.value_mut().update_with_enr(last_enr_seq); if !old_status.is_connected() { @@ -945,7 +978,7 @@ impl Discv4Service { } kbucket::Entry::Pending(mut entry, mut status) => { // endpoint is now proven - entry.value().has_endpoint_proof = true; + entry.value().establish_proof(); entry.value().update_with_enr(last_enr_seq); if !status.is_connected() { @@ -1129,6 +1162,8 @@ impl Discv4Service { // try to send it ctx.unmark_queried(record.id); } else { + // we just received a ping from that peer so we can send a find node request + // directly self.find_node(&record, ctx); } } @@ -1419,14 +1454,28 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - BucketEntry::Present(mut entry, _) => { - if entry.value_mut().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + BucketEntry::Present(entry, _) => { + if entry.value().has_endpoint_proof { + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } BucketEntry::Pending(mut entry, _) => { if entry.value().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } } @@ -1486,27 +1535,27 @@ impl Discv4Service { self.remove_node(node_id); } - self.evict_failed_neighbours(now); + self.evict_failed_find_nodes(now); } /// Handles failed responses to `FindNode` - fn evict_failed_neighbours(&mut self, now: Instant) { - let mut failed_neighbours = Vec::new(); + fn evict_failed_find_nodes(&mut self, now: Instant) { + let mut failed_find_nodes = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. - failed_neighbours.push(*node_id); + failed_find_nodes.push(*node_id); } return false } true }); - trace!(target: "discv4", num=%failed_neighbours.len(), "processing failed neighbours"); + trace!(target: "discv4", num=%failed_find_nodes.len(), "processing failed find nodes"); - for node_id in failed_neighbours { + for node_id in failed_find_nodes { let key = kad_key(node_id); let failures = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { @@ -1523,7 +1572,7 @@ impl Discv4Service { // if the node failed to respond anything useful multiple times, remove the node from // the table, but only if there are enough other nodes in the bucket (bucket must be at // least half full) - if failures > (self.config.max_find_node_failures as usize) { + if failures > self.config.max_find_node_failures { self.soft_remove_node(node_id); } } @@ -2216,8 +2265,8 @@ struct NodeEntry { last_enr_seq: Option, /// `ForkId` if retrieved via ENR requests. fork_id: Option, - /// Counter for failed findNode requests. - find_node_failures: usize, + /// Counter for failed _consecutive_ findNode requests. + find_node_failures: u8, /// Whether the endpoint of the peer is proven. has_endpoint_proof: bool, } @@ -2244,6 +2293,17 @@ impl NodeEntry { node } + /// Marks the entry with an established proof and resets the consecutive failure counter. + fn establish_proof(&mut self) { + self.has_endpoint_proof = true; + self.find_node_failures = 0; + } + + /// Returns true if the tracked find node failures exceed the max amount + const fn exceeds_find_node_failures(&self, max_failures: u8) -> bool { + self.find_node_failures >= max_failures + } + /// Updates the last timestamp and sets the enr seq fn update_with_enr(&mut self, last_enr_seq: Option) -> Option { self.update_now(|s| std::mem::replace(&mut s.last_enr_seq, last_enr_seq)) @@ -2660,6 +2720,45 @@ mod tests { assert_eq!(ctx.inner.closest_nodes.borrow().len(), 1); } + #[tokio::test] + async fn test_reping_on_find_node_failures() { + reth_tracing::init_test_tracing(); + + let config = Discv4Config::builder().build(); + let (_discv4, mut service) = create_discv4_with_config(config).await; + + let target = PeerId::random(); + + let id = PeerId::random(); + let key = kad_key(id); + let record = NodeRecord::new("0.0.0.0:0".parse().unwrap(), id); + + let mut entry = NodeEntry::new_proven(record); + entry.find_node_failures = u8::MAX; + let _ = service.kbuckets.insert_or_update( + &key, + entry, + NodeStatus { + direction: ConnectionDirection::Incoming, + state: ConnectionState::Connected, + }, + ); + + service.lookup(target); + assert_eq!(service.pending_find_nodes.len(), 0); + assert_eq!(service.pending_pings.len(), 1); + + service.update_on_pong(record, None); + + service + .on_entry(record.id, |entry| { + // reset on pong + assert_eq!(entry.find_node_failures, 0); + assert!(entry.has_endpoint_proof); + }) + .unwrap(); + } + #[tokio::test] async fn test_service_commands() { reth_tracing::init_test_tracing(); From ab407e74447fc02d7d48fc2bbe0f6807787ec0a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 15:39:41 +0200 Subject: [PATCH 112/970] chore: bump alloy 054 (#12000) --- Cargo.lock | 104 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 54 ++++++++++++++-------------- 2 files changed, 79 insertions(+), 79 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e10bdb0a67..f2d134cb85f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf02dfacfc815214f9b54ff50d54900ba527a68fd73e2c5637ced3460005045" +checksum = "41ed961a48297c732a5d97ee321aa8bb5009ecadbcb077d8bec90cb54e651629" dependencies = [ "alloy-eips", "alloy-primitives", @@ -177,9 +177,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769da342b6bcd945013925ef4c40763cc82f11e002c60702dba8b444bb60e5a7" +checksum = "b69e06cf9c37be824b9d26d6d101114fdde6af0c87de2828b414c05c4b3daa71" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c698ce0ada980b17f0323e1a28c7da8a2e9abc6dff5be9ee33d1525b28ac46b6" +checksum = "dde15e14944a88bd6a57d325e9a49b75558746fe16aaccc79713ae50a6a9574c" dependencies = [ "alloy-primitives", "alloy-serde", @@ -221,9 +221,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1050e1d65524c030b17442b6546b564da51fdab7f71bd534b001ba65f2ebb16" +checksum = "af5979e0d5a7bf9c7eb79749121e8256e59021af611322aee56e77e20776b4b3" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -235,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da34a18446a27734473af3d77eb21c5ebbdf97ea8eb65c39c0b50916bc659023" +checksum = "204237129086ce5dc17a58025e93739b01b45313841f98fa339eb1d780511e57" dependencies = [ "alloy-consensus", "alloy-eips", @@ -269,9 +269,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439fc6a933b9f8e8b272a8cac35dbeabaf2b2eaf9590482bebedb5782153118e" +checksum = "27444ea67d360508753022807cdd0b49a95c878924c9c5f8f32668b7d7768245" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -318,9 +318,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45dbc0e3630becef9e988b69d43339f68d67e32a854e3c855bc28bd5031895b" +checksum = "4814d141ede360bb6cd1b4b064f1aab9de391e7c4d0d4d50ac89ea4bc1e25fbd" dependencies = [ "alloy-chains", "alloy-consensus", @@ -359,9 +359,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e3961a56e10f44bfd69dd3f4b0854b90b84c612b0c43708e738933e8b47f93a" +checksum = "96ba46eb69ddf7a9925b81f15229cb74658e6eebe5dd30a5b74e2cd040380573" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -400,9 +400,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917e5504e4f8f7e39bdc322ff81589ed54c1e462240adaeb58162c2d986a5a2b" +checksum = "7fc2bd1e7403463a5f2c61e955bcc9d3072b63aa177442b0f9aa6a6d22a941e3" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -425,9 +425,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c7eb2dc6db1dd41e5e7bd2b98a38813854efc30e034afd90d1e420e7f3de2b" +checksum = "eea9bf1abdd506f985a53533f5ac01296bcd6102c5e139bbc5d40bc468d2c916" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -438,9 +438,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd468a4e3eddcd9d612cad657852de4b7475ac2080e7af9224fbf1df20ddffe0" +checksum = "ea02c25541fb19eaac4278aa5c41d2d7e0245898887e54a74bfc0f3103e99415" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -450,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2640928d9b1d43bb1cec7a0d615e10c2b407c5bd8ff1fcbe49e6318a2b62d731" +checksum = "2382fc63fb0cf3e02818d547b80cb66cc49a31f8803d0c328402b2008bc13650" dependencies = [ "alloy-primitives", "alloy-serde", @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f731ad2ef8d7dd75a4d28214f4922a5b683feee1e6df35bd7b427315f94366" +checksum = "45357a642081c8ce235c0ad990c4e9279f5f18a723545076b38cfcc05cc25234" dependencies = [ "alloy-eips", "alloy-primitives", @@ -475,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06bd0757bfb3eccde06ee3f4e378f5839fe923d40956cff586018d4427a15bb5" +checksum = "a5afe3ab1038f90faf56304aa0adf1e6a8c9844615d8f83967f932f3a70390b1" dependencies = [ "alloy-primitives", "serde", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3d95c3bf03efbb7bdc1d097e2931f520aac47438b709ccd8f065a7793dd371" +checksum = "886d22d41992287a235af2f3af4299b5ced2bcafb81eb835572ad35747476946" dependencies = [ "alloy-consensus", "alloy-eips", @@ -506,9 +506,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e855b0daccf2320ba415753c3fed422abe9d3ad5d77b2d6cafcc9bcf32fe387f" +checksum = "00b034779a4850b4b03f5be5ea674a1cf7d746b2da762b34d1860ab45e48ca27" dependencies = [ "alloy-consensus", "alloy-eips", @@ -527,9 +527,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca3753b9894235f915437f908644e737d8714c686ce4e8d03afbf585b23f074" +checksum = "3246948dfa5f5060a9abe04233d741ea656ef076b12958f3242416ce9f375058" dependencies = [ "alloy-eips", "alloy-primitives", @@ -540,9 +540,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae58a997afde032cd021547c960a53eef6245f47969dd71886e9f63fb45a6048" +checksum = "4e5fb6c5c401321f802f69dcdb95b932f30f8158f6798793f914baac5995628e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -554,9 +554,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "667e45c882fda207d4cc94c4bb35e24a23347955113dcb236a5e4e0eaddef826" +checksum = "9ad066b49c3b1b5f64cdd2399177a19926a6a15db2dbf11e2098de621f9e7480" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -566,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c2661ca6785add8fc37aff8005439c806ffad58254c19939c6f59ac0d6596e" +checksum = "028e72eaa9703e4882344983cfe7636ce06d8cce104a78ea62fd19b46659efc4" dependencies = [ "alloy-primitives", "arbitrary", @@ -578,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eca011160d18a7dc6d8cdc1e8dc13e2e86c908f8e41b02aa76e429d6fe7085" +checksum = "592c185d7100258c041afac51877660c7bf6213447999787197db4842f0e938e" dependencies = [ "alloy-primitives", "async-trait", @@ -592,9 +592,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c54b195a6ee5a83f32e7c697b4e6b565966737ed5a2ef9176bbbb39f720d023" +checksum = "6614f02fc1d5b079b2a4a5320018317b506fd0a6d67c1fd5542a71201724986c" dependencies = [ "alloy-consensus", "alloy-network", @@ -680,9 +680,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4a136e733f55fef0870b81e1f8f1db28e78973d1b1ae5a5df642ba39538a07" +checksum = "be77579633ebbc1266ae6fd7694f75c408beb1aeb6865d0b18f22893c265a061" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -700,9 +700,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a6b358a89b6d107b92d09b61a61fbc04243942182709752c796f4b29402cead" +checksum = "91fd1a5d0827939847983b46f2f79510361f901dc82f8e3c38ac7397af142c6e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -715,9 +715,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a899c43b7f5e3bc83762dfe5128fccd9cfa99f1f03c5f26bbfb2495ae8dcd35" +checksum = "8073d1186bfeeb8fbdd1292b6f1a0731f3aed8e21e1463905abfae0b96a887a6" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -734,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27aac1246e13c9e6fa0c784fbb0c56872c6224f78dbde388bb2213ccdf8af02" +checksum = "61f27837bb4a1d6c83a28231c94493e814882f0e9058648a97e908a5f3fc9fcf" dependencies = [ "alloy-pubsub", "alloy-transport", diff --git a/Cargo.toml b/Cargo.toml index e8f10229ed5..22a78979dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -432,39 +432,39 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.5.3", default-features = false } -alloy-eips = { version = "0.5.3", default-features = false } -alloy-genesis = { version = "0.5.3", default-features = false } -alloy-json-rpc = { version = "0.5.3", default-features = false } -alloy-network = { version = "0.5.3", default-features = false } -alloy-network-primitives = { version = "0.5.3", default-features = false } -alloy-node-bindings = { version = "0.5.3", default-features = false } -alloy-provider = { version = "0.5.3", features = [ +alloy-consensus = { version = "0.5.4", default-features = false } +alloy-eips = { version = "0.5.4", default-features = false } +alloy-genesis = { version = "0.5.4", default-features = false } +alloy-json-rpc = { version = "0.5.4", default-features = false } +alloy-network = { version = "0.5.4", default-features = false } +alloy-network-primitives = { version = "0.5.4", default-features = false } +alloy-node-bindings = { version = "0.5.4", default-features = false } +alloy-provider = { version = "0.5.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.5.3", default-features = false } -alloy-rpc-client = { version = "0.5.3", default-features = false } -alloy-rpc-types = { version = "0.5.3", features = [ +alloy-pubsub = { version = "0.5.4", default-features = false } +alloy-rpc-client = { version = "0.5.4", default-features = false } +alloy-rpc-types = { version = "0.5.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.5.3", default-features = false } -alloy-rpc-types-anvil = { version = "0.5.3", default-features = false } -alloy-rpc-types-beacon = { version = "0.5.3", default-features = false } -alloy-rpc-types-debug = { version = "0.5.3", default-features = false } -alloy-rpc-types-engine = { version = "0.5.3", default-features = false } -alloy-rpc-types-eth = { version = "0.5.3", default-features = false } -alloy-rpc-types-mev = { version = "0.5.3", default-features = false } -alloy-rpc-types-trace = { version = "0.5.3", default-features = false } -alloy-rpc-types-txpool = { version = "0.5.3", default-features = false } -alloy-serde = { version = "0.5.3", default-features = false } -alloy-signer = { version = "0.5.3", default-features = false } -alloy-signer-local = { version = "0.5.3", default-features = false } -alloy-transport = { version = "0.5.3" } -alloy-transport-http = { version = "0.5.3", features = [ +alloy-rpc-types-admin = { version = "0.5.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.5.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.5.4", default-features = false } +alloy-rpc-types-debug = { version = "0.5.4", default-features = false } +alloy-rpc-types-engine = { version = "0.5.4", default-features = false } +alloy-rpc-types-eth = { version = "0.5.4", default-features = false } +alloy-rpc-types-mev = { version = "0.5.4", default-features = false } +alloy-rpc-types-trace = { version = "0.5.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.5.4", default-features = false } +alloy-serde = { version = "0.5.4", default-features = false } +alloy-signer = { version = "0.5.4", default-features = false } +alloy-signer-local = { version = "0.5.4", default-features = false } +alloy-transport = { version = "0.5.4" } +alloy-transport-http = { version = "0.5.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.5.3", default-features = false } -alloy-transport-ws = { version = "0.5.3", default-features = false } +alloy-transport-ipc = { version = "0.5.4", default-features = false } +alloy-transport-ws = { version = "0.5.4", default-features = false } # op op-alloy-rpc-types = "0.5" From 8a40d5c6aabd8f232b9aaacce0ca474a480a9b64 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:40:14 +0900 Subject: [PATCH 113/970] feat(providers): add `AtomicBlockchainProvider` (#11705) Co-authored-by: Matthias Seitz --- crates/chain-state/src/in_memory.rs | 69 +- crates/chain-state/src/lib.rs | 2 +- crates/chain-state/src/memory_overlay.rs | 349 +-- crates/engine/service/src/service.rs | 4 +- .../src/providers/blockchain_provider.rs | 1137 +--------- .../provider/src/providers/consistent.rs | 1871 +++++++++++++++++ crates/storage/provider/src/providers/mod.rs | 3 + 7 files changed, 2220 insertions(+), 1215 deletions(-) create mode 100644 crates/storage/provider/src/providers/consistent.rs diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index be33e1fd79a..a850e66521a 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,7 +4,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_eips::BlockNumHash; +use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; @@ -514,7 +514,7 @@ impl CanonicalInMemoryState { historical: StateProviderBox, ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { - state.chain().into_iter().map(|block_state| block_state.block()).collect() + state.chain().map(|block_state| block_state.block()).collect() } else { Vec::new() }; @@ -692,10 +692,8 @@ impl BlockState { /// Returns a vector of `BlockStates` representing the entire in memory chain. /// The block state order in the output vector is newest to oldest (highest to lowest), /// including self as the first element. - pub fn chain(&self) -> Vec<&Self> { - let mut chain = vec![self]; - self.append_parent_chain(&mut chain); - chain + pub fn chain(&self) -> impl Iterator { + std::iter::successors(Some(self), |state| state.parent.as_deref()) } /// Appends the parent chain of this [`BlockState`] to the given vector. @@ -715,10 +713,59 @@ impl BlockState { /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { - let in_memory = self.chain().into_iter().map(|block_state| block_state.block()).collect(); + let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) } + + /// Tries to find a block by [`BlockHashOrNumber`] in the chain ending at this block. + pub fn block_on_chain(&self, hash_or_num: BlockHashOrNumber) -> Option<&Self> { + self.chain().find(|block| match hash_or_num { + BlockHashOrNumber::Hash(hash) => block.hash() == hash, + BlockHashOrNumber::Number(number) => block.number() == number, + }) + } + + /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. + pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .find(|tx| tx.hash() == hash) + .cloned() + }) + } + + /// Tries to find a transaction with meta by [`TxHash`] in the chain ending at this block. + pub fn transaction_meta_on_chain( + &self, + tx_hash: TxHash, + ) -> Option<(TransactionSigned, TransactionMeta)> { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .enumerate() + .find(|(_, tx)| tx.hash() == tx_hash) + .map(|(index, tx)| { + let meta = TransactionMeta { + tx_hash, + index: index as u64, + block_hash: block_state.hash(), + block_number: block_state.block_ref().block.number, + base_fee: block_state.block_ref().block.header.base_fee_per_gas, + timestamp: block_state.block_ref().block.timestamp, + excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + }; + (tx.clone(), meta) + }) + }) + } } /// Represents an executed block stored in-memory. @@ -1382,7 +1429,7 @@ mod tests { let parents = single_block.parent_state_chain(); assert_eq!(parents.len(), 0); - let block_state_chain = single_block.chain(); + let block_state_chain = single_block.chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, single_block_number); assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); @@ -1393,18 +1440,18 @@ mod tests { let mut test_block_builder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); - let block_state_chain = chain[2].chain(); + let block_state_chain = chain[2].chain().collect::>(); assert_eq!(block_state_chain.len(), 3); assert_eq!(block_state_chain[0].block().block.number, 3); assert_eq!(block_state_chain[1].block().block.number, 2); assert_eq!(block_state_chain[2].block().block.number, 1); - let block_state_chain = chain[1].chain(); + let block_state_chain = chain[1].chain().collect::>(); assert_eq!(block_state_chain.len(), 2); assert_eq!(block_state_chain[0].block().block.number, 2); assert_eq!(block_state_chain[1].block().block.number, 1); - let block_state_chain = chain[0].chain(); + let block_state_chain = chain[0].chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, 1); } diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 50a10311107..bd9b43a59ea 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -22,7 +22,7 @@ pub use notifications::{ }; mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; +pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderRef}; #[cfg(any(test, feature = "test-utils"))] /// Common test helpers diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index eb125dad115..ada0faee490 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -7,14 +7,26 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, - StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, + StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; use std::sync::OnceLock; +/// A state provider that stores references to in-memory blocks along with their state as well as a +/// reference of the historical state provider for fallback lookups. +#[allow(missing_debug_implementations)] +pub struct MemoryOverlayStateProviderRef<'a> { + /// Historical state provider for state lookups that are not found in in-memory blocks. + pub(crate) historical: Box, + /// The collection of executed parent blocks. Expected order is newest to oldest. + pub(crate) in_memory: Vec, + /// Lazy-loaded in-memory trie data. + pub(crate) trie_state: OnceLock, +} + /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] @@ -27,193 +39,200 @@ pub struct MemoryOverlayStateProvider { pub(crate) trie_state: OnceLock, } -impl MemoryOverlayStateProvider { - /// Create new memory overlay state provider. - /// - /// ## Arguments - /// - /// - `in_memory` - the collection of executed ancestor blocks in reverse. - /// - `historical` - a historical state provider for the latest ancestor block stored in the - /// database. - pub fn new(historical: Box, in_memory: Vec) -> Self { - Self { historical, in_memory, trie_state: OnceLock::new() } - } - - /// Turn this state provider into a [`StateProviderBox`] - pub fn boxed(self) -> StateProviderBox { - Box::new(self) - } - - /// Return lazy-loaded trie state aggregated from in-memory blocks. - fn trie_state(&self) -> &MemoryOverlayTrieState { - self.trie_state.get_or_init(|| { - let mut trie_state = MemoryOverlayTrieState::default(); - for block in self.in_memory.iter().rev() { - trie_state.state.extend_ref(block.hashed_state.as_ref()); - trie_state.nodes.extend_ref(block.trie.as_ref()); - } - trie_state - }) - } -} +macro_rules! impl_state_provider { + ([$($tokens:tt)*],$type:ty, $historical_type:ty) => { + impl $($tokens)* $type { + /// Create new memory overlay state provider. + /// + /// ## Arguments + /// + /// - `in_memory` - the collection of executed ancestor blocks in reverse. + /// - `historical` - a historical state provider for the latest ancestor block stored in the + /// database. + pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + Self { historical, in_memory, trie_state: OnceLock::new() } + } + + /// Turn this state provider into a state provider + pub fn boxed(self) -> $historical_type { + Box::new(self) + } -impl BlockHashReader for MemoryOverlayStateProvider { - fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - for block in &self.in_memory { - if block.block.number == number { - return Ok(Some(block.block.hash())) + /// Return lazy-loaded trie state aggregated from in-memory blocks. + fn trie_state(&self) -> &MemoryOverlayTrieState { + self.trie_state.get_or_init(|| { + let mut trie_state = MemoryOverlayTrieState::default(); + for block in self.in_memory.iter().rev() { + trie_state.state.extend_ref(block.hashed_state.as_ref()); + trie_state.nodes.extend_ref(block.trie.as_ref()); + } + trie_state + }) } } - self.historical.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - let range = start..end; - let mut earliest_block_number = None; - let mut in_memory_hashes = Vec::new(); - for block in &self.in_memory { - if range.contains(&block.block.number) { - in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + impl $($tokens)* BlockHashReader for $type { + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in &self.in_memory { + if block.block.number == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in &self.in_memory { + if range.contains(&block.block.number) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) } } - let mut hashes = - self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; - hashes.append(&mut in_memory_hashes); - Ok(hashes) - } -} + impl $($tokens)* AccountReader for $type { + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in &self.in_memory { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } -impl AccountReader for MemoryOverlayStateProvider { - fn basic_account(&self, address: Address) -> ProviderResult> { - for block in &self.in_memory { - if let Some(account) = block.execution_output.account(&address) { - return Ok(account) + self.historical.basic_account(address) } } - self.historical.basic_account(address) - } -} + impl $($tokens)* StateRootProvider for $type { + fn state_root(&self, state: HashedPostState) -> ProviderResult { + self.state_root_from_nodes(TrieInput::from_state(state)) + } -impl StateRootProvider for MemoryOverlayStateProvider { - fn state_root(&self, state: HashedPostState) -> ProviderResult { - self.state_root_from_nodes(TrieInput::from_state(state)) - } - - fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes(input) - } - - fn state_root_with_updates( - &self, - state: HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { - self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) - } - - fn state_root_from_nodes_with_updates( - &self, - mut input: TrieInput, - ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes_with_updates(input) - } -} + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes(input) + } -impl StorageRootProvider for MemoryOverlayStateProvider { - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) - } - - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_proof( - &self, - address: Address, - slot: B256, - storage: HashedStorage, - ) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) - } -} + fn state_root_with_updates( + &self, + state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) + } -impl StateProofProvider for MemoryOverlayStateProvider { - fn proof( - &self, - mut input: TrieInput, - address: Address, - slots: &[B256], - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.proof(input, address, slots) - } - - fn multiproof( - &self, - mut input: TrieInput, - targets: HashMap>, - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.multiproof(input, targets) - } - - fn witness( - &self, - mut input: TrieInput, - target: HashedPostState, - ) -> ProviderResult> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.witness(input, target) - } -} + fn state_root_from_nodes_with_updates( + &self, + mut input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes_with_updates(input) + } + } + + impl $($tokens)* StorageRootProvider for $type { + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_root(address, hashed_storage) + } -impl StateProvider for MemoryOverlayStateProvider { - fn storage( - &self, - address: Address, - storage_key: StorageKey, - ) -> ProviderResult> { - for block in &self.in_memory { - if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { - return Ok(Some(value)) + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_proof( + &self, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_proof(address, slot, hashed_storage) } } - self.historical.storage(address, storage_key) - } + impl $($tokens)* StateProofProvider for $type { + fn proof( + &self, + mut input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.multiproof(input, targets) + } - fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - for block in &self.in_memory { - if let Some(contract) = block.execution_output.bytecode(&code_hash) { - return Ok(Some(contract)) + fn witness( + &self, + mut input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.witness(input, target) } } - self.historical.bytecode_by_hash(code_hash) - } + impl $($tokens)* StateProvider for $type { + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in &self.in_memory { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in &self.in_memory { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } + } + }; } +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); + /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] pub(crate) struct MemoryOverlayTrieState { diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 026476a8260..198438d457f 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -151,7 +151,9 @@ mod tests { use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; use std::sync::Arc; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 13215e11a8e..64a8a204a32 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,13 +1,15 @@ +#![allow(unused)] use crate::{ - providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, HeaderProvider, ProviderError, + providers::{ConsistentProvider, StaticFileProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -15,10 +17,10 @@ use reth_chain_state::{ MemoryOverlayStateProvider, }; use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db::models::BlockNumberAddress; +use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, @@ -27,21 +29,17 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::StorageChangeSetReader; +use reth_storage_api::{DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use revm::{ - db::states::PlainStorageRevert, - primitives::{BlockEnv, CfgEnvWithHandlerCfg}, -}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ - collections::{hash_map, HashMap}, - ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, time::Instant, }; use tracing::trace; -use super::ProviderNodeTypes; +use crate::providers::ProviderNodeTypes; /// The main type for interacting with the blockchain. /// @@ -50,11 +48,11 @@ use super::ProviderNodeTypes; /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] pub struct BlockchainProvider2 { - /// Provider type used to access the database. - database: ProviderFactory, + /// Provider factory used to access the database. + pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(super) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -67,15 +65,15 @@ impl Clone for BlockchainProvider2 { } impl BlockchainProvider2 { - /// Create a new provider using only the database, fetching the latest header from - /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { - let provider = database.provider()?; + /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest + /// header from the database to initialize the provider. + pub fn new(storage: ProviderFactory) -> ProviderResult { + let provider = storage.provider()?; let best = provider.chain_info()?; match provider.header_by_number(best.best_number)? { Some(header) => { drop(provider); - Ok(Self::with_latest(database, SealedHeader::new(header, best.best_hash))?) + Ok(Self::with_latest(storage, SealedHeader::new(header, best.best_hash))?) } None => Err(ProviderError::HeaderNotFound(best.best_number.into())), } @@ -86,8 +84,8 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> ProviderResult { - let provider = database.provider()?; + pub fn with_latest(storage: ProviderFactory, latest: SealedHeader) -> ProviderResult { + let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? .map(|num| provider.sealed_header(num)) @@ -104,7 +102,7 @@ impl BlockchainProvider2 { .transpose()? .flatten(); Ok(Self { - database, + database: storage, canonical_in_memory_state: CanonicalInMemoryState::with_head( latest, finalized_header, @@ -118,281 +116,12 @@ impl BlockchainProvider2 { self.canonical_in_memory_state.clone() } - // Helper function to convert range bounds - fn convert_range_bounds( - &self, - range: impl RangeBounds, - end_unbounded: impl FnOnce() -> T, - ) -> (T, T) - where - T: Copy + Add + Sub + From, - { - let start = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + T::from(1u8), - Bound::Unbounded => T::from(0u8), - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n - T::from(1u8), - Bound::Unbounded => end_unbounded(), - }; - - (start, end) - } - - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - let end_block_number = *range.end(); - - // We are not removing block meta as it is used to get block changesets. - let mut block_bodies = Vec::new(); - for block_num in range.clone() { - let block_body = self - .block_body_indices(block_num)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; - block_bodies.push((block_num, block_body)) - } - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { - return Ok(None) - }; - - let mut account_changeset = Vec::new(); - for block_num in range.clone() { - let changeset = - self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); - account_changeset.extend(changeset); - } - - let mut storage_changeset = Vec::new(); - for block_num in range { - let changeset = self.storage_changeset(block_num)?; - storage_changeset.extend(changeset); - } - - let (state, reverts) = - self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; - - let mut receipt_iter = - self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for tx_num in block_body.tx_num_range() { - let receipt = receipt_iter - .next() - .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; - block_receipts.push(Some(receipt)); - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - // We skip new contracts since we never delete them from the database - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the - /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given - /// storage and account changesets. - fn populate_bundle_state( - &self, - account_changeset: Vec<(u64, AccountBeforeTx)>, - storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, - block_range_end: BlockNumber, - ) -> ProviderResult<(BundleStateInit, RevertsInit)> { - let mut state: BundleStateInit = HashMap::new(); - let mut reverts: RevertsInit = HashMap::new(); - let state_provider = self.state_by_block_number_or_tag(block_range_end.into())?; - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let new_info = state_provider.basic_account(address)?; - entry.insert((old_info, new_info, HashMap::new())); - } - hash_map::Entry::Occupied(mut entry) => { - // overwrite old account state. - entry.get_mut().0 = old_info; - } - } - // insert old info into reverts. - reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); - } - - // add storage changeset changes - for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((block_number, address)) = block_and_address; - // get account state or insert from plain state. - let account_state = match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let present_info = state_provider.basic_account(address)?; - entry.insert((present_info, present_info, HashMap::new())) - } - hash_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - // match storage. - match account_state.2.entry(old_storage.key) { - hash_map::Entry::Vacant(entry) => { - let new_storage_value = - state_provider.storage(address, old_storage.key)?.unwrap_or_default(); - entry.insert((old_storage.value, new_storage_value)); - } - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().0 = old_storage.value; - } - }; - - reverts - .entry(block_number) - .or_default() - .entry(address) - .or_default() - .1 - .push(old_storage); - } - - Ok((state, reverts)) - } - - /// Fetches a range of data from both in-memory state and persistent storage while a predicate - /// is met. - /// - /// Creates a snapshot of the in-memory chain state and database provider to prevent - /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing - /// recent in-memory blocks in case of overlaps. - /// - /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the - /// user to retrieve the required items from the database using [`RangeInclusive`]. - /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory - /// state, allowing for selection or filtering for the desired data. - fn get_in_memory_or_storage_by_block_range_while( - &self, - range: impl RangeBounds, - fetch_db_range: F, - map_block_state_item: G, - mut predicate: P, - ) -> ProviderResult> - where - F: FnOnce( - &DatabaseProviderRO, - RangeInclusive, - &mut P, - ) -> ProviderResult>, - G: Fn(Arc, &mut P) -> Option, - P: FnMut(&T) -> bool, - { - // Each one provides a snapshot at the time of instantiation, but its order matters. - // - // If we acquire first the database provider, it's possible that before the in-memory chain - // snapshot is instantiated, it will flush blocks to disk. This would - // mean that our database provider would not have access to the flushed blocks (since it's - // working under an older view), while the in-memory state may have deleted them - // entirely. Resulting in gaps on the range. - let mut in_memory_chain = - self.canonical_in_memory_state.canonical_chain().collect::>(); - let db_provider = self.database_provider_ro()?; - - let (start, end) = self.convert_range_bounds(range, || { - // the first block is the highest one. - in_memory_chain - .first() - .map(|b| b.number()) - .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) - }); - - if start > end { - return Ok(vec![]) - } - - // Split range into storage_range and in-memory range. If the in-memory range is not - // necessary drop it early. - // - // The last block of `in_memory_chain` is the lowest block number. - let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { - Some(lowest_memory_block) if lowest_memory_block <= end => { - let highest_memory_block = - in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); - - // Database will for a time overlap with in-memory-chain blocks. In - // case of a re-org, it can mean that the database blocks are of a forked chain, and - // so, we should prioritize the in-memory overlapped blocks. - let in_memory_range = - lowest_memory_block.max(start)..=end.min(highest_memory_block); - - // If requested range is in the middle of the in-memory range, remove the necessary - // lowest blocks - in_memory_chain.truncate( - in_memory_chain - .len() - .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), - ); - - let storage_range = - (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); - - (Some((in_memory_chain, in_memory_range)), storage_range) - } - _ => { - // Drop the in-memory chain so we don't hold blocks in memory. - drop(in_memory_chain); - - (None, Some(start..=end)) - } - }; - - let mut items = Vec::with_capacity((end - start + 1) as usize); - - if let Some(storage_range) = storage_range { - let mut db_items = fetch_db_range(&db_provider, storage_range.clone(), &mut predicate)?; - items.append(&mut db_items); - - // The predicate was not met, if the number of items differs from the expected. So, we - // return what we have. - if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { - return Ok(items) - } - } - - if let Some((in_memory_chain, in_memory_range)) = in_memory { - for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { - debug_assert!(num == block.number()); - if let Some(item) = map_block_state_item(block, &mut predicate) { - items.push(item); - } else { - break - } - } - } - - Ok(items) + /// Returns a provider with a created `DbTx` inside, which allows fetching data from the + /// database using different types of providers. Example: [`HeaderProvider`] + /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + #[track_caller] + pub fn consistent_provider(&self) -> ProviderResult> { + ConsistentProvider::new(self.database.clone(), self.canonical_in_memory_state()) } /// This uses a given [`BlockState`] to initialize a state provider for that block. @@ -405,222 +134,14 @@ impl BlockchainProvider2 { Ok(state.state_provider(latest_historical)) } - /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// - /// * `fetch_from_db`: has a [`DatabaseProviderRO`] and the storage specific range. - /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from - /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. - fn get_in_memory_or_storage_by_tx_range( - &self, - range: impl RangeBounds, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce( - DatabaseProviderRO, - RangeInclusive, - ) -> ProviderResult>, - M: Fn(RangeInclusive, Arc) -> ProviderResult>, - { - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the storage which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the storage, which marks the start of - // the in-memory state. - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - let (start, end) = self.convert_range_bounds(range, || { - in_mem_chain - .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) - .sum::() + - last_block_body_index.last_tx_num() - }); - - if start > end { - return Ok(vec![]) - } - - let mut tx_range = start..=end; - - // If the range is entirely before the first in-memory transaction number, fetch from - // storage - if *tx_range.end() < in_memory_tx_num { - return fetch_from_db(provider, tx_range); - } - - let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); - - // If the range spans storage and memory, get elements from storage first. - if *tx_range.start() < in_memory_tx_num { - // Determine the range that needs to be fetched from storage. - let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); - - // Set the remaining transaction range for in-memory - tx_range = in_memory_tx_num..=*tx_range.end(); - - items.extend(fetch_from_db(provider, db_range)?); - } - - // Iterate from the lowest block to the highest in-memory chain - for block_state in in_mem_chain.into_iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); - let remaining = (tx_range.end() - tx_range.start() + 1) as usize; - - // If the transaction range start is equal or higher than the next block first - // transaction, advance - if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { - in_memory_tx_num += block_tx_count as u64; - continue - } - - // This should only be more than 0 once, in case of a partial range inside a block. - let skip = (tx_range.start() - in_memory_tx_num) as usize; - - items.extend(fetch_from_block_state( - skip..=skip + (remaining.min(block_tx_count - skip) - 1), - block_state, - )?); - - in_memory_tx_num += block_tx_count as u64; - - // Break if the range has been fully processed - if in_memory_tx_num > *tx_range.end() { - break - } - - // Set updated range - tx_range = in_memory_tx_num..=*tx_range.end(); - } - - Ok(items) - } - - /// Fetches data from either in-memory state or persistent storage by transaction - /// [`HashOrNumber`]. - fn get_in_memory_or_storage_by_tx( - &self, - id: HashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, Arc) -> ProviderResult>, - { - // Order of instantiation matters. More information on: - // `get_in_memory_or_storage_by_block_range_while`. - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the database which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the database and consider it the - // first tx number of the in-memory state - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - // If the transaction number is less than the first in-memory transaction number, make a - // database lookup - if let HashOrNumber::Number(id) = id { - if id < in_memory_tx_num { - return fetch_from_db(provider) - } - } - - // Iterate from the lowest block to the highest - for block_state in in_mem_chain.into_iter().rev() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - - for tx_index in 0..block.body.transactions.len() { - match id { - HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - HashOrNumber::Number(id) => { - if id == in_memory_tx_num { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - } - - in_memory_tx_num += 1; - } - } - - // Not found in-memory, so check database. - if let HashOrNumber::Hash(_) = id { - return fetch_from_db(provider) - } - - Ok(None) - } - - /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. - fn get_in_memory_or_storage_by_block( + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( &self, - id: BlockHashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult, - M: Fn(Arc) -> ProviderResult, - { - let block_state = match id { - BlockHashOrNumber::Hash(block_hash) => { - self.canonical_in_memory_state.state_by_hash(block_hash) - } - BlockHashOrNumber::Number(block_number) => { - self.canonical_in_memory_state.state_by_number(block_number) - } - }; - - if let Some(block_state) = block_state { - return fetch_from_block_state(block_state) - } - fetch_from_db(self.database_provider_ro()?) - } -} - -impl BlockchainProvider2 { - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.get_state(range) } } @@ -646,78 +167,34 @@ impl StaticFileProviderFactory for BlockchainProvider2 impl HeaderProvider for BlockchainProvider2 { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - (*block_hash).into(), - |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header(block_hash) } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - num.into(), - |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + self.consistent_provider()?.header_by_number(num) } fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(*hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } + self.consistent_provider()?.header_td(hash) } fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.canonical_in_memory_state.hash_by_number(number).is_some() { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.database.header_td_by_number(number) + self.consistent_provider()?.header_td_by_number(number) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), - |_| true, - ) + self.consistent_provider()?.headers_range(range) } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), - ) + self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), - |_| true, - ) + self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( @@ -725,25 +202,13 @@ impl HeaderProvider for BlockchainProvider2 { range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), - |block_state, predicate| { - let header = &block_state.block_ref().block().header; - predicate(header).then(|| header.clone()) - }, - predicate, - ) + self.consistent_provider()?.sealed_headers_while(range, predicate) } } impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_hash(number), - |block_state| Ok(Some(block_state.hash())), - ) + self.consistent_provider()?.block_hash(number) } fn canonical_hashes_range( @@ -751,15 +216,7 @@ impl BlockHashReader for BlockchainProvider2 { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - start..end, - |db_provider, inclusive_range, _| { - db_provider - .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) - }, - |block_state, _| Some(block_state.hash()), - |_| true, - ) + self.consistent_provider()?.canonical_hashes_range(start, end) } } @@ -777,11 +234,7 @@ impl BlockNumReader for BlockchainProvider2 { } fn block_number(&self, hash: B256) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.block_number(hash), - |block_state| Ok(Some(block_state.number())), - ) + self.consistent_provider()?.block_number(hash) } } @@ -801,28 +254,11 @@ impl BlockIdReader for BlockchainProvider2 { impl BlockReader for BlockchainProvider2 { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - match source { - BlockSource::Any | BlockSource::Canonical => { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) - } - BlockSource::Pending => { - Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) - } - } + self.consistent_provider()?.find_block_by_hash(hash, source) } fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) + self.consistent_provider()?.block(id) } fn pending_block(&self) -> ProviderResult> { @@ -838,51 +274,14 @@ impl BlockReader for BlockchainProvider2 { } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.ommers(id), - |block_state| { - if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { - return Ok(Some(Vec::new())) - } - - Ok(Some(block_state.block_ref().block().body.ommers.clone())) - }, - ) + self.consistent_provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_body_indices(number), - |block_state| { - // Find the last block indices on database - let last_storage_block_number = block_state.anchor().number; - let mut stored_indices = self - .database - .block_body_indices(last_storage_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; - - // Prepare our block indices - stored_indices.first_tx_num = stored_indices.next_tx_num(); - stored_indices.tx_count = 0; - - // Iterate from the lowest block in memory until our target block - for state in block_state.chain().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; - if state.block_ref().block().number == number { - stored_indices.tx_count = block_tx_count; - } else { - stored_indices.first_tx_num += block_tx_count; - } - } - - Ok(Some(stored_indices)) - }, - ) + self.consistent_provider()?.block_body_indices(number) } /// Returns the block with senders with matching number or hash from database. @@ -896,11 +295,7 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.block_with_senders())), - ) + self.consistent_provider()?.block_with_senders(id, transaction_kind) } fn sealed_block_with_senders( @@ -908,259 +303,116 @@ impl BlockReader for BlockchainProvider2 { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.sealed_block_with_senders())), - ) + self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone().unseal()), - |_| true, - ) + self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.block_with_senders()), - |_| true, - ) + self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.sealed_block_with_senders()), - |_| true, - ) + self.consistent_provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider2 { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - tx_hash.into(), - |db_provider| db_provider.transaction_id(tx_hash), - |_, tx_number, _| Ok(Some(tx_number)), - ) + self.consistent_provider()?.transaction_id(tx_hash) } fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id(id), - |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.transaction_by_id(id) } fn transaction_by_id_no_hash( &self, id: TxNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id_no_hash(id), - |tx_index, _, block_state| { - Ok(block_state - .block_ref() - .block() - .body - .transactions - .get(tx_index) - .cloned() - .map(Into::into)) - }, - ) + self.consistent_provider()?.transaction_by_id_no_hash(id) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(tx) = self.canonical_in_memory_state.transaction_by_hash(hash) { - return Ok(Some(tx)) - } - - self.database.transaction_by_hash(hash) + self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, ) -> ProviderResult> { - if let Some((tx, meta)) = - self.canonical_in_memory_state.transaction_by_hash_with_meta(tx_hash) - { - return Ok(Some((tx, meta))) - } - - self.database.transaction_by_hash_with_meta(tx_hash) + self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), - ) + self.consistent_provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), - ) + self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), - |_| true, - ) + self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) - }, - ) + self.consistent_provider()?.transactions_by_tx_range(range) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.senders_by_tx_range(db_range), - |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), - ) + self.consistent_provider()?.senders_by_tx_range(range) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), - ) + self.consistent_provider()?.transaction_sender(id) } } impl ReceiptProvider for BlockchainProvider2 { fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.receipt(id), - |tx_index, _, block_state| { - Ok(block_state.executed_block_receipts().get(tx_index).cloned()) - }, - ) + self.consistent_provider()?.receipt(id) } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - for block_state in self.canonical_in_memory_state.canonical_chain() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - let receipts = block_state.executed_block_receipts(); - - // assuming 1:1 correspondence between transactions and receipts - debug_assert_eq!( - block.body.transactions.len(), - receipts.len(), - "Mismatch between transaction and receipt count" - ); - - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) - { - // safe to use tx_index for receipts due to 1:1 correspondence - return Ok(receipts.get(tx_index).cloned()); - } - } - - self.database.receipt_by_hash(hash) + self.consistent_provider()?.receipt_by_hash(hash) } fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - block, - |db_provider| db_provider.receipts_by_block(block), - |block_state| Ok(Some(block_state.executed_block_receipts())), - ) + self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.executed_block_receipts().drain(index_range).collect()) - }, - ) + self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - let block_state = self - .canonical_in_memory_state - .state_by_hash(rpc_block_hash.block_hash) - .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; - receipts = Some(block_state.executed_block_receipts()); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self - .canonical_in_memory_state - .pending_state() - .map(|block_state| block_state.executed_block_receipts())), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } + self.consistent_provider()?.receipts_by_block_id(block) } } @@ -1170,47 +422,25 @@ impl WithdrawalsProvider for BlockchainProvider2 { id: BlockHashOrNumber, timestamp: u64, ) -> ProviderResult> { - if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), - ) + self.consistent_provider()?.withdrawals_by_block(id, timestamp) } fn latest_withdrawal(&self) -> ProviderResult> { - let best_block_num = self.best_block_number()?; - - self.get_in_memory_or_storage_by_block( - best_block_num.into(), - |db_provider| db_provider.latest_withdrawal(), - |block_state| { - Ok(block_state - .block_ref() - .block() - .body - .withdrawals - .clone() - .and_then(|mut w| w.pop())) - }, - ) + self.consistent_provider()?.latest_withdrawal() } } impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) + self.consistent_provider()?.get_stage_checkpoint(id) } fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) + self.consistent_provider()?.get_stage_checkpoint_progress(id) } fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() + self.consistent_provider()?.get_all_checkpoints() } } @@ -1225,9 +455,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) + self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) } fn fill_env_with_header( @@ -1240,11 +468,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } fn fill_cfg_env_at( @@ -1256,9 +480,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) + self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } fn fill_cfg_env_with_header( @@ -1270,11 +492,7 @@ impl EvmEnvProvider for BlockchainProvider2 { where EvmConfig: ConfigureEvmEnv
, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } @@ -1283,11 +501,11 @@ impl PruneCheckpointReader for BlockchainProvider2 { &self, segment: PruneSegment, ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) + self.consistent_provider()?.get_prune_checkpoint(segment) } fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() + self.consistent_provider()?.get_prune_checkpoints() } } @@ -1318,8 +536,9 @@ impl StateProviderFactory for BlockchainProvider2 { block_number: BlockNumber, ) -> ProviderResult { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - let hash = self + let provider = self.consistent_provider()?; + provider.ensure_canonical_block(block_number)?; + let hash = provider .block_hash(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; self.history_by_block_hash(hash) @@ -1328,14 +547,11 @@ impl StateProviderFactory for BlockchainProvider2 { fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.get_in_memory_or_storage_by_block( + self.consistent_provider()?.get_in_memory_or_storage_by_block( block_hash.into(), - |_| { - // TODO(joshie): port history_by_block_hash to DatabaseProvider and use db_provider - self.database.history_by_block_hash(block_hash) - }, + |_| self.database.history_by_block_hash(block_hash), |block_state| { - let state_provider = self.block_state_provider(&block_state)?; + let state_provider = self.block_state_provider(block_state)?; Ok(Box::new(state_provider)) }, ) @@ -1444,105 +660,35 @@ where } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider2 where Self: BlockReader + ReceiptProviderIdExt, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - self.block_by_hash(hash.block_hash) - } - } - } + self.consistent_provider()?.block_by_id(id) } fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => { - Some(self.canonical_in_memory_state.get_canonical_head().unseal()) - } - BlockNumberOrTag::Finalized => { - self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => { - self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), - - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) + self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, ) -> ProviderResult> { - match id { - BlockNumberOrTag::Latest => { - Ok(Some(self.canonical_in_memory_state.get_canonical_head())) - } - BlockNumberOrTag::Finalized => { - Ok(self.canonical_in_memory_state.get_finalized_header()) - } - BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - } + self.consistent_provider()?.sealed_header_by_number_or_tag(id) } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), - }) + self.consistent_provider()?.sealed_header_by_id(id) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) + self.consistent_provider()?.header_by_id(id) } fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } + self.consistent_provider()?.ommers_by_id(id) } } @@ -1569,49 +715,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .storage - .into_iter() - .flatten() - .flat_map(|revert: PlainStorageRevert| { - revert.storage_revert.into_iter().map(move |(key, value)| { - ( - BlockNumberAddress((block_number, revert.address)), - StorageEntry { key: key.into(), value: value.to_previous_value() }, - ) - }) - }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let storage_history_exists = provider - .get_prune_checkpoint(PruneSegment::StorageHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !storage_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.storage_changeset(block_number) - } + self.consistent_provider()?.storage_changeset(block_number) } } @@ -1620,50 +724,14 @@ impl ChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block_ref() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .accounts - .into_iter() - .flatten() - .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let account_history_exists = provider - .get_prune_checkpoint(PruneSegment::AccountHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !account_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.account_block_changeset(block_number) - } + self.consistent_provider()?.account_block_changeset(block_number) } } impl AccountReader for BlockchainProvider2 { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - // use latest state provider - let state_provider = self.latest()?; - state_provider.basic_account(address) + self.consistent_provider()?.basic_account(address) } } @@ -1678,12 +746,7 @@ impl StateReader for BlockchainProvider2 { /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. fn get_state(&self, block: BlockNumber) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { - let state = state.block_ref().execution_outcome().clone(); - Ok(Some(state)) - } else { - self.get_state(block..=block) - } + StateReader::get_state(&self.consistent_provider()?, block) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs new file mode 100644 index 00000000000..d6847fa1b8f --- /dev/null +++ b/crates/storage/provider/src/providers/consistent.rs @@ -0,0 +1,1871 @@ +use super::{DatabaseProviderRO, ProviderFactory, ProviderNodeTypes}; +use crate::{ + providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, + BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; +use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::models::BlockNumberAddress; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_primitives::{ + Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Withdrawal, Withdrawals, +}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; +use reth_storage_errors::provider::ProviderResult; +use revm::{ + db::states::PlainStorageRevert, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + sync::Arc, +}; +use tracing::trace; + +/// Type that interacts with a snapshot view of the blockchain (storage and in-memory) at time of +/// instantiation, EXCEPT for pending, safe and finalized block which might change while holding +/// this provider. +/// +/// CAUTION: Avoid holding this provider for too long or the inner database transaction will +/// time-out. +#[derive(Debug)] +pub struct ConsistentProvider { + /// Storage provider. + storage_provider: as DatabaseProviderFactory>::Provider, + /// Head block at time of [`Self`] creation + head_block: Option>, + /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. + canonical_in_memory_state: CanonicalInMemoryState, +} + +impl ConsistentProvider { + /// Create a new provider using [`ProviderFactory`] and [`CanonicalInMemoryState`], + /// + /// Underneath it will take a snapshot by fetching [`CanonicalInMemoryState::head_state`] and + /// [`ProviderFactory::database_provider_ro`] effectively maintaining one single snapshotted + /// view of memory and database. + pub fn new( + storage_provider_factory: ProviderFactory, + state: CanonicalInMemoryState, + ) -> ProviderResult { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let head_block = state.head_state(); + let storage_provider = storage_provider_factory.database_provider_ro()?; + Ok(Self { storage_provider, head_block, canonical_in_memory_state: state }) + } + + // Helper function to convert range bounds + fn convert_range_bounds( + &self, + range: impl RangeBounds, + end_unbounded: impl FnOnce() -> T, + ) -> (T, T) + where + T: Copy + Add + Sub + From, + { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + T::from(1u8), + Bound::Unbounded => T::from(0u8), + }; + + let end = match range.end_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n - T::from(1u8), + Bound::Unbounded => end_unbounded(), + }; + + (start, end) + } + + /// Storage provider for latest block + fn latest_ref<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::blockchain", "Getting latest block state provider"); + + // use latest state provider if the head state exists + if let Some(state) = &self.head_block { + trace!(target: "providers::blockchain", "Using head state for latest state provider"); + Ok(self.block_state_provider_ref(state)?.boxed()) + } else { + trace!(target: "providers::blockchain", "Using database state for latest state provider"); + self.storage_provider.latest() + } + } + + fn history_by_block_hash_ref<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + + self.get_in_memory_or_storage_by_block( + block_hash.into(), + |_| self.storage_provider.history_by_block_hash(block_hash), + |block_state| { + let state_provider = self.block_state_provider_ref(block_state)?; + Ok(Box::new(state_provider)) + }, + ) + } + + /// Returns a state provider indexed by the given block number or tag. + fn state_by_block_number_ref<'a>( + &'a self, + number: BlockNumber, + ) -> ProviderResult> { + let hash = + self.block_hash(number)?.ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + self.history_by_block_hash_ref(hash) + } + + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if range.is_empty() { + return Ok(None) + } + let start_block_number = *range.start(); + let end_block_number = *range.end(); + + // We are not removing block meta as it is used to get block changesets. + let mut block_bodies = Vec::new(); + for block_num in range.clone() { + let block_body = self + .block_body_indices(block_num)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; + block_bodies.push((block_num, block_body)) + } + + // get transaction receipts + let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { + return Ok(None) + }; + + let mut account_changeset = Vec::new(); + for block_num in range.clone() { + let changeset = + self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); + account_changeset.extend(changeset); + } + + let mut storage_changeset = Vec::new(); + for block_num in range { + let changeset = self.storage_changeset(block_num)?; + storage_changeset.extend(changeset); + } + + let (state, reverts) = + self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; + + let mut receipt_iter = + self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::with_capacity(block_bodies.len()); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for tx_num in block_body.tx_num_range() { + let receipt = receipt_iter + .next() + .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; + block_receipts.push(Some(receipt)); + } + receipts.push(block_receipts); + } + + Ok(Some(ExecutionOutcome::new_init( + state, + reverts, + // We skip new contracts since we never delete them from the database + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + ))) + } + + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given + /// storage and account changesets. + fn populate_bundle_state( + &self, + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + block_range_end: BlockNumber, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> { + let mut state: BundleStateInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::new(); + let state_provider = self.state_by_block_number_ref(block_range_end)?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = state_provider.basic_account(address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } + + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = state_provider.basic_account(address)?; + entry.insert((present_info, present_info, HashMap::new())) + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage_value = + state_provider.storage(address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_storage_value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); + } + + Ok((state, reverts)) + } + + /// Fetches a range of data from both in-memory state and persistent storage while a predicate + /// is met. + /// + /// Creates a snapshot of the in-memory chain state and database provider to prevent + /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing + /// recent in-memory blocks in case of overlaps. + /// + /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the + /// user to retrieve the required items from the database using [`RangeInclusive`]. + /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory + /// state, allowing for selection or filtering for the desired data. + fn get_in_memory_or_storage_by_block_range_while( + &self, + range: impl RangeBounds, + fetch_db_range: F, + map_block_state_item: G, + mut predicate: P, + ) -> ProviderResult> + where + F: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + &mut P, + ) -> ProviderResult>, + G: Fn(&BlockState, &mut P) -> Option, + P: FnMut(&T) -> bool, + { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let mut in_memory_chain = + self.head_block.as_ref().map(|b| b.chain().collect::>()).unwrap_or_default(); + let db_provider = &self.storage_provider; + + let (start, end) = self.convert_range_bounds(range, || { + // the first block is the highest one. + in_memory_chain + .first() + .map(|b| b.number()) + .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) + }); + + if start > end { + return Ok(vec![]) + } + + // Split range into storage_range and in-memory range. If the in-memory range is not + // necessary drop it early. + // + // The last block of `in_memory_chain` is the lowest block number. + let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { + Some(lowest_memory_block) if lowest_memory_block <= end => { + let highest_memory_block = + in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); + + // Database will for a time overlap with in-memory-chain blocks. In + // case of a re-org, it can mean that the database blocks are of a forked chain, and + // so, we should prioritize the in-memory overlapped blocks. + let in_memory_range = + lowest_memory_block.max(start)..=end.min(highest_memory_block); + + // If requested range is in the middle of the in-memory range, remove the necessary + // lowest blocks + in_memory_chain.truncate( + in_memory_chain + .len() + .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), + ); + + let storage_range = + (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); + + (Some((in_memory_chain, in_memory_range)), storage_range) + } + _ => { + // Drop the in-memory chain so we don't hold blocks in memory. + drop(in_memory_chain); + + (None, Some(start..=end)) + } + }; + + let mut items = Vec::with_capacity((end - start + 1) as usize); + + if let Some(storage_range) = storage_range { + let mut db_items = fetch_db_range(db_provider, storage_range.clone(), &mut predicate)?; + items.append(&mut db_items); + + // The predicate was not met, if the number of items differs from the expected. So, we + // return what we have. + if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { + return Ok(items) + } + } + + if let Some((in_memory_chain, in_memory_range)) = in_memory { + for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { + debug_assert!(num == block.number()); + if let Some(item) = map_block_state_item(block, &mut predicate) { + items.push(item); + } else { + break + } + } + } + + Ok(items) + } + + /// This uses a given [`BlockState`] to initialize a state provider for that block. + fn block_state_provider_ref( + &self, + state: &BlockState, + ) -> ProviderResult> { + let anchor_hash = state.anchor().hash; + let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; + let in_memory = state.chain().map(|block_state| block_state.block()).collect(); + Ok(MemoryOverlayStateProviderRef::new(latest_historical, in_memory)) + } + + /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// + /// * `fetch_from_db`: has a `DatabaseProviderRO` and the storage specific range. + /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from + /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. + fn get_in_memory_or_storage_by_tx_range( + &self, + range: impl RangeBounds, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + ) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the storage which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the storage, which marks the start of + // the in-memory state. + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + let (start, end) = self.convert_range_bounds(range, || { + in_mem_chain + .iter() + .map(|b| b.block_ref().block().body.transactions.len() as u64) + .sum::() + + last_block_body_index.last_tx_num() + }); + + if start > end { + return Ok(vec![]) + } + + let mut tx_range = start..=end; + + // If the range is entirely before the first in-memory transaction number, fetch from + // storage + if *tx_range.end() < in_memory_tx_num { + return fetch_from_db(provider, tx_range); + } + + let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); + + // If the range spans storage and memory, get elements from storage first. + if *tx_range.start() < in_memory_tx_num { + // Determine the range that needs to be fetched from storage. + let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); + + // Set the remaining transaction range for in-memory + tx_range = in_memory_tx_num..=*tx_range.end(); + + items.extend(fetch_from_db(provider, db_range)?); + } + + // Iterate from the lowest block to the highest in-memory chain + for block_state in in_mem_chain.iter().rev() { + let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let remaining = (tx_range.end() - tx_range.start() + 1) as usize; + + // If the transaction range start is equal or higher than the next block first + // transaction, advance + if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { + in_memory_tx_num += block_tx_count as u64; + continue + } + + // This should only be more than 0 once, in case of a partial range inside a block. + let skip = (tx_range.start() - in_memory_tx_num) as usize; + + items.extend(fetch_from_block_state( + skip..=skip + (remaining.min(block_tx_count - skip) - 1), + block_state, + )?); + + in_memory_tx_num += block_tx_count as u64; + + // Break if the range has been fully processed + if in_memory_tx_num > *tx_range.end() { + break + } + + // Set updated range + tx_range = in_memory_tx_num..=*tx_range.end(); + } + + Ok(items) + } + + /// Fetches data from either in-memory state or persistent storage by transaction + /// [`HashOrNumber`]. + fn get_in_memory_or_storage_by_tx( + &self, + id: HashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the database which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the database and consider it the + // first tx number of the in-memory state + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + // If the transaction number is less than the first in-memory transaction number, make a + // database lookup + if let HashOrNumber::Number(id) = id { + if id < in_memory_tx_num { + return fetch_from_db(provider) + } + } + + // Iterate from the lowest block to the highest + for block_state in in_mem_chain.iter().rev() { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + + for tx_index in 0..block.body.transactions.len() { + match id { + HashOrNumber::Hash(tx_hash) => { + if tx_hash == block.body.transactions[tx_index].hash() { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + HashOrNumber::Number(id) => { + if id == in_memory_tx_num { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + } + + in_memory_tx_num += 1; + } + } + + // Not found in-memory, so check database. + if let HashOrNumber::Hash(_) = id { + return fetch_from_db(provider) + } + + Ok(None) + } + + /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. + pub(crate) fn get_in_memory_or_storage_by_block( + &self, + id: BlockHashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, + { + if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { + return fetch_from_block_state(block_state) + } + fetch_from_db(&self.storage_provider) + } +} + +impl ConsistentProvider { + /// Ensures that the given block number is canonical (synced) + /// + /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are + /// out of range and would lead to invalid results, mainly during initial sync. + /// + /// Verifying the `block_number` would be expensive since we need to lookup sync table + /// Instead, we ensure that the `block_number` is within the range of the + /// [`Self::best_block_number`] which is updated when a block is synced. + #[inline] + pub(crate) fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { + let latest = self.best_block_number()?; + if block_number > latest { + Err(ProviderError::HeaderNotFound(block_number.into())) + } else { + Ok(()) + } + } +} + +impl StaticFileProviderFactory for ConsistentProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.storage_provider.static_file_provider() + } +} + +impl HeaderProvider for ConsistentProvider { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + (*block_hash).into(), + |db_provider| db_provider.header(block_hash), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + num.into(), + |db_provider| db_provider.header_by_number(num), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + if let Some(num) = self.block_number(*hash)? { + self.header_td_by_number(num) + } else { + Ok(None) + } + } + + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() + { + // If the block exists in memory, we should return a TD for it. + // + // The canonical in memory state should only store post-merge blocks. Post-merge blocks + // have zero difficulty. This means we can use the total difficulty for the last + // finalized block number if present (so that we are not affected by reorgs), if not the + // last number in the database will be used. + if let Some(last_finalized_num_hash) = + self.canonical_in_memory_state.get_finalized_num_hash() + { + last_finalized_num_hash.number + } else { + self.last_block_number()? + } + } else { + // Otherwise, return what we have on disk for the input block + number + }; + self.storage_provider.header_td_by_number(number) + } + + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |_| true, + ) + } + + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.sealed_header(number), + |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + ) + } + + fn sealed_headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.clone()), + |_| true, + ) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), + |block_state, predicate| { + let header = &block_state.block_ref().block().header; + predicate(header).then(|| header.clone()) + }, + predicate, + ) + } +} + +impl BlockHashReader for ConsistentProvider { + fn block_hash(&self, number: u64) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_hash(number), + |block_state| Ok(Some(block_state.hash())), + ) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + start..end, + |db_provider, inclusive_range, _| { + db_provider + .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) + }, + |block_state, _| Some(block_state.hash()), + |_| true, + ) + } +} + +impl BlockNumReader for ConsistentProvider { + fn chain_info(&self) -> ProviderResult { + let best_number = self.best_block_number()?; + Ok(ChainInfo { best_hash: self.block_hash(best_number)?.unwrap_or_default(), best_number }) + } + + fn best_block_number(&self) -> ProviderResult { + self.head_block.as_ref().map(|b| Ok(b.number())).unwrap_or_else(|| self.last_block_number()) + } + + fn last_block_number(&self) -> ProviderResult { + self.storage_provider.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.block_number(hash), + |block_state| Ok(Some(block_state.number())), + ) + } +} + +impl BlockIdReader for ConsistentProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_num_hash()) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_safe_num_hash()) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_finalized_num_hash()) + } +} + +impl BlockReader for ConsistentProvider { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + match source { + BlockSource::Any | BlockSource::Canonical => { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.find_block_by_hash(hash, source), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + BlockSource::Pending => { + Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) + } + } + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block(id), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + + fn pending_block(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block()) + } + + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_with_senders()) + } + + fn pending_block_and_receipts(&self) -> ProviderResult)>> { + Ok(self.canonical_in_memory_state.pending_block_and_receipts()) + } + + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.ommers(id), + |block_state| { + if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { + return Ok(Some(Vec::new())) + } + + Ok(Some(block_state.block_ref().block().body.ommers.clone())) + }, + ) + } + + fn block_body_indices( + &self, + number: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_body_indices(number), + |block_state| { + // Find the last block indices on database + let last_storage_block_number = block_state.anchor().number; + let mut stored_indices = self + .storage_provider + .block_body_indices(last_storage_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; + + // Prepare our block indices + stored_indices.first_tx_num = stored_indices.next_tx_num(); + stored_indices.tx_count = 0; + + // Iterate from the lowest block in memory until our target block + for state in block_state.chain().collect::>().into_iter().rev() { + let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + if state.block_ref().block().number == number { + stored_indices.tx_count = block_tx_count; + } else { + stored_indices.first_tx_num += block_tx_count; + } + } + + Ok(Some(stored_indices)) + }, + ) + } + + /// Returns the block with senders with matching number or hash from database. + /// + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid + /// hashes, since they would need to be calculated on the spot, and we want fast querying.** + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.block_with_senders())), + ) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.sealed_block_with_senders())), + ) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_range(range), + |block_state, _| Some(block_state.block_ref().block().clone().unseal()), + |_| true, + ) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_with_senders_range(range), + |block_state, _| Some(block_state.block_with_senders()), + |_| true, + ) + } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), + |block_state, _| Some(block_state.sealed_block_with_senders()), + |_| true, + ) + } +} + +impl TransactionsProvider for ConsistentProvider { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + tx_hash.into(), + |db_provider| db_provider.transaction_id(tx_hash), + |_, tx_number, _| Ok(Some(tx_number)), + ) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id(id), + |tx_index, _, block_state| { + Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + }, + ) + } + + fn transaction_by_id_no_hash( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id_no_hash(id), + |tx_index, _, block_state| { + Ok(block_state + .block_ref() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { + return Ok(Some(tx)) + } + + self.storage_provider.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + tx_hash: TxHash, + ) -> ProviderResult> { + if let Some((tx, meta)) = + self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) + { + return Ok(Some((tx, meta))) + } + + self.storage_provider.transaction_by_hash_with_meta(tx_hash) + } + + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_block(id), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), + ) + } + + fn transactions_by_block( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |provider| provider.transactions_by_block(id), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + ) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.transactions_by_block_range(range), + |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |_| true, + ) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.block_ref().block().body.transactions[index_range] + .iter() + .cloned() + .map(Into::into) + .collect()) + }, + ) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.senders_by_tx_range(db_range), + |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + ) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_sender(id), + |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), + ) + } +} + +impl ReceiptProvider for ConsistentProvider { + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.receipt(id), + |tx_index, _, block_state| { + Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + }, + ) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + for block_state in self.head_block.iter().flat_map(|b| b.chain()) { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + let receipts = block_state.executed_block_receipts(); + + // assuming 1:1 correspondence between transactions and receipts + debug_assert_eq!( + block.body.transactions.len(), + receipts.len(), + "Mismatch between transaction and receipt count" + ); + + if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + { + // safe to use tx_index for receipts due to 1:1 correspondence + return Ok(receipts.get(tx_index).cloned()); + } + } + + self.storage_provider.receipt_by_hash(hash) + } + + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + block, + |db_provider| db_provider.receipts_by_block(block), + |block_state| Ok(Some(block_state.executed_block_receipts())), + ) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.executed_block_receipts().drain(index_range).collect()) + }, + ) + } +} + +impl ReceiptProviderIdExt for ConsistentProvider { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + match block { + BlockId::Hash(rpc_block_hash) => { + let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; + if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { + if let Some(state) = self + .head_block + .as_ref() + .and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into())) + { + receipts = Some(state.executed_block_receipts()); + } + } + Ok(receipts) + } + BlockId::Number(num_tag) => match num_tag { + BlockNumberOrTag::Pending => Ok(self + .canonical_in_memory_state + .pending_state() + .map(|block_state| block_state.executed_block_receipts())), + _ => { + if let Some(num) = self.convert_block_number(num_tag)? { + self.receipts_by_block(num.into()) + } else { + Ok(None) + } + } + }, + } + } +} + +impl WithdrawalsProvider for ConsistentProvider { + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { + return Ok(None) + } + + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.withdrawals_by_block(id, timestamp), + |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + ) + } + + fn latest_withdrawal(&self) -> ProviderResult> { + let best_block_num = self.best_block_number()?; + + self.get_in_memory_or_storage_by_block( + best_block_num.into(), + |db_provider| db_provider.latest_withdrawal(), + |block_state| { + Ok(block_state + .block_ref() + .block() + .body + .withdrawals + .clone() + .and_then(|mut w| w.pop())) + }, + ) + } +} + +impl StageCheckpointReader for ConsistentProvider { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { + self.storage_provider.get_stage_checkpoint(id) + } + + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + self.storage_provider.get_stage_checkpoint_progress(id) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_all_checkpoints() + } +} + +impl EvmEnvProvider for ConsistentProvider { + fn fill_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_env_with_header(cfg, block_env, &header, evm_config) + } + + fn fill_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); + Ok(()) + } + + fn fill_cfg_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_cfg_env_with_header(cfg, &header, evm_config) + } + + fn fill_cfg_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
, + { + let total_difficulty = self + .header_td_by_number(header.number)? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + evm_config.fill_cfg_env(cfg, header, total_difficulty); + Ok(()) + } +} + +impl PruneCheckpointReader for ConsistentProvider { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { + self.storage_provider.get_prune_checkpoint(segment) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_prune_checkpoints() + } +} + +impl ChainSpecProvider for ConsistentProvider { + type ChainSpec = N::ChainSpec; + + fn chain_spec(&self) -> Arc { + ChainSpecProvider::chain_spec(&self.storage_provider) + } +} + +impl BlockReaderIdExt for ConsistentProvider { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(num) => self.block_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: should we only apply this for the RPCs that are listed in EIP-1898? + // so not at the provider level? + // if we decide to do this at a higher level, then we can make this an automatic + // trait impl + if Some(true) == hash.require_canonical { + // check the database, canonical blocks are only stored in the database + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + } else { + self.block_by_hash(hash.block_hash) + } + } + } + } + + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + Ok(match id { + BlockNumberOrTag::Latest => { + Some(self.canonical_in_memory_state.get_canonical_head().unseal()) + } + BlockNumberOrTag::Finalized => { + self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Safe => { + self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), + + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) + } + + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + match id { + BlockNumberOrTag::Latest => { + Ok(Some(self.canonical_in_memory_state.get_canonical_head())) + } + BlockNumberOrTag::Finalized => { + Ok(self.canonical_in_memory_state.get_finalized_header()) + } + BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), + BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), + BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( + || Ok(None), + |h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + Ok(Some(SealedHeader::new(header, seal))) + }, + ), + } + } + + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { + let sealed = h.seal_slow(); + let (header, seal) = sealed.into_parts(); + SealedHeader::new(header, seal) + }), + }) + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) + } + + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + match id { + BlockId::Number(num) => self.ommers_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: EIP-1898 question, see above + // here it is not handled + self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) + } + } + } +} + +impl StorageChangeSetReader for ConsistentProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((block_number, revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.storage_changeset(block_number) + } + } +} + +impl ChangeSetReader for ConsistentProvider { + fn account_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.account_block_changeset(block_number) + } + } +} + +impl AccountReader for ConsistentProvider { + /// Get basic account information. + fn basic_account(&self, address: Address) -> ProviderResult> { + // use latest state provider + let state_provider = self.latest_ref()?; + state_provider.basic_account(address) + } +} + +impl StateReader for ConsistentProvider { + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. + /// + /// If data for the block does not exist, this will return [`None`]. + /// + /// NOTE: This cannot be called safely in a loop outside of the blockchain tree thread. This is + /// because the [`CanonicalInMemoryState`] could change during a reorg, causing results to be + /// inconsistent. Currently this can safely be called within the blockchain tree thread, + /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the + /// first place. + fn get_state(&self, block: BlockNumber) -> ProviderResult> { + if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { + let state = state.block_ref().execution_outcome().clone(); + Ok(Some(state)) + } else { + Self::get_state(self, block..=block) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + providers::blockchain_provider::BlockchainProvider2, + test_utils::create_test_provider_factory, BlockWriter, + }; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::B256; + use itertools::Itertools; + use rand::Rng; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; + use reth_db::models::AccountBeforeTx; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::SealedBlock; + use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; + use reth_testing_utils::generators::{ + self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, + }; + use revm::db::BundleState; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + }; + + const TEST_BLOCKS_COUNT: usize = 5; + + fn random_blocks( + rng: &mut impl Rng, + database_blocks: usize, + in_memory_blocks: usize, + requests_count: Option>, + withdrawals_count: Option>, + tx_count: impl RangeBounds, + ) -> (Vec, Vec) { + let block_range = (database_blocks + in_memory_blocks - 1) as u64; + + let tx_start = match tx_count.start_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n, + Bound::Unbounded => u8::MIN, + }; + let tx_end = match tx_count.end_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n + 1, + Bound::Unbounded => u8::MAX, + }; + + let blocks = random_block_range( + rng, + 0..=block_range, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: tx_start..tx_end, + requests_count, + withdrawals_count, + }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(database_blocks); + (database_blocks.to_vec(), in_memory_blocks.to_vec()) + } + + #[test] + fn test_block_reader_find_block_by_hash() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // Useful blocks + let first_db_block = database_blocks.first().unwrap(); + let first_in_mem_block = in_memory_blocks.first().unwrap(); + let last_in_mem_block = in_memory_blocks.last().unwrap(); + + // No block in memory before setting in memory state + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + None + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + None + ); + // No pending block in memory + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + let consistent_provider = provider.consistent_provider()?; + + // Now the block should be found in memory + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + Some(first_in_mem_block.clone().into()) + ); + + // Find the first block in database by hash + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, + Some(first_db_block.clone().into()) + ); + + // No pending block in database + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert the last block into the pending state + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(last_in_mem_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + // Now the last block should be found in memory + assert_eq!( + consistent_provider + .find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, + Some(last_in_mem_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_block_reader_block() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // First in memory block + let first_in_mem_block = in_memory_blocks.first().unwrap(); + // First database block + let first_db_block = database_blocks.first().unwrap(); + + // First in memory block should not be found yet as not integrated to the in-memory state + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + None + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + // First in memory block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + Some(first_in_mem_block.clone().into()) + ); + + // First database block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_db_block.number))?, + Some(first_db_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_changeset_reader() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let (database_blocks, in_memory_blocks) = + random_blocks(&mut rng, TEST_BLOCKS_COUNT, 1, None, None, 0..1); + + let first_database_block = database_blocks.first().map(|block| block.number).unwrap(); + let last_database_block = database_blocks.last().map(|block| block.number).unwrap(); + let first_in_memory_block = in_memory_blocks.first().map(|block| block.number).unwrap(); + + let accounts = random_eoa_accounts(&mut rng, 2); + + let (database_changesets, database_state) = random_changeset_range( + &mut rng, + &database_blocks, + accounts.into_iter().map(|(address, account)| (address, (account, Vec::new()))), + 0..0, + 0..0, + ); + let (in_memory_changesets, in_memory_state) = random_changeset_range( + &mut rng, + &in_memory_blocks, + database_state + .iter() + .map(|(address, (account, storage))| (*address, (*account, storage.clone()))), + 0..0, + 0..0, + ); + + let factory = create_test_provider_factory(); + + let provider_rw = factory.provider_rw()?; + provider_rw.append_blocks_with_state( + database_blocks + .into_iter() + .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .collect(), + ExecutionOutcome { + bundle: BundleState::new( + database_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + database_changesets + .iter() + .map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) + }) + }) + .collect::>(), + Vec::new(), + ), + first_block: first_database_block, + ..Default::default() + }, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + let provider = BlockchainProvider2::new(factory)?; + + let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); + let chain = NewCanonicalChain::Commit { + new: vec![in_memory_blocks + .first() + .map(|block| { + let senders = block.senders().expect("failed to recover senders"); + ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(senders), + Arc::new(ExecutionOutcome { + bundle: BundleState::new( + in_memory_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + [in_memory_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), Vec::new()) + })], + [], + ), + first_block: first_in_memory_block, + ..Default::default() + }), + Default::default(), + Default::default(), + ) + }) + .unwrap()], + }; + provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + assert_eq!( + consistent_provider.account_block_changeset(last_database_block).unwrap(), + database_changesets + .into_iter() + .last() + .unwrap() + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + assert_eq!( + consistent_provider.account_block_changeset(first_in_memory_block).unwrap(), + in_memory_changesets + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index a67ebf89ba6..c81ef05d2ea 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -61,6 +61,9 @@ pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; pub use blockchain_provider::BlockchainProvider2; +mod consistent; +pub use consistent::ConsistentProvider; + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. pub trait ProviderNodeTypes: NodeTypesWithDB {} From e98a050dc7ce868d079587d8f417b51d80a36cc8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Oct 2024 15:27:52 +0100 Subject: [PATCH 114/970] fix(trie): account for existing nodes when revealing a node (#11836) --- crates/trie/sparse/src/errors.rs | 10 + crates/trie/sparse/src/trie.rs | 301 +++++++++++++++++++++++++++---- 2 files changed, 280 insertions(+), 31 deletions(-) diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index f60d1736c06..506b206fdd7 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -4,6 +4,8 @@ use alloy_primitives::{Bytes, B256}; use reth_trie::Nibbles; use thiserror::Error; +use crate::SparseNode; + /// Result type with [`SparseStateTrieError`] as error. pub type SparseStateTrieResult = Result; @@ -43,6 +45,14 @@ pub enum SparseTrieError { /// Node hash hash: B256, }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 39deb50e7a6..fae1141ec1c 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -142,21 +142,55 @@ impl RevealedSparseTrie { stack_ptr += 1; } } - self.nodes - .insert(path, SparseNode::Branch { state_mask: branch.state_mask, hash: None }); - } - TrieNode::Extension(ext) => { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(&ext.key); - self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); - } - TrieNode::Leaf(leaf) => { - let mut full = path.clone(); - full.extend_from_slice_unchecked(&leaf.key); - self.values.insert(full, leaf.value); - self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + + match self.nodes.get(&path) { + // Blinded and non-existent nodes can be replaced. + Some(SparseNode::Hash(_)) | None => { + self.nodes.insert( + path, + SparseNode::Branch { state_mask: branch.state_mask, hash: None }, + ); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + Some(SparseNode::Branch { .. } | SparseNode::Extension { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + } } + TrieNode::Extension(ext) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + self.reveal_node_or_hash(child_path, &ext.child)?; + self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + Some(SparseNode::Extension { .. } | SparseNode::Branch { .. }) => {} + // All other node types can't be handled. + Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + }, + TrieNode::Leaf(leaf) => match self.nodes.get(&path) { + Some(SparseNode::Hash(_)) | None => { + let mut full = path.clone(); + full.extend_from_slice_unchecked(&leaf.key); + self.values.insert(full, leaf.value); + self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + } + // Left node already exists. + Some(SparseNode::Leaf { .. }) => {} + // All other node types can't be handled. + Some( + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }), + ) => return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }), + }, } Ok(()) @@ -164,8 +198,18 @@ impl RevealedSparseTrie { fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { if child.len() == B256::len_bytes() + 1 { - // TODO: revise insert to not overwrite existing entries - self.nodes.insert(path, SparseNode::Hash(B256::from_slice(&child[1..]))); + let hash = B256::from_slice(&child[1..]); + match self.nodes.get(&path) { + // Hash node with a different hash can't be handled. + Some(node @ SparseNode::Hash(previous_hash)) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + } + None => { + self.nodes.insert(path, SparseNode::Hash(hash)); + } + // All other node types mean that it has already been revealed. + Some(_) => {} + } return Ok(()) } @@ -273,6 +317,10 @@ impl RevealedSparseTrie { // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry // in `nodes`, but not in the `values`. + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + let mut removed_nodes = self.take_nodes_for_path(path)?; debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. @@ -1159,36 +1207,32 @@ mod tests { // Empty pretty_assertions::assert_eq!( sparse.nodes.clone().into_iter().collect::>(), - BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty),]) + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) ); } #[test] fn sparse_trie_remove_leaf_blinded() { - let mut sparse = RevealedSparseTrie::default(); - let leaf = LeafNode::new( Nibbles::default(), alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); // Reveal a branch node and one of its children // // Branch (Mask = 11) // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) - sparse - .reveal_node( - Nibbles::default(), - TrieNode::Branch(BranchNode::new( - vec![ - RlpNode::word_rlp(&B256::repeat_byte(1)), - RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), - ], - TrieMask::new(0b11), - )), - ) - .unwrap(); + sparse.reveal_node(Nibbles::default(), branch).unwrap(); sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); // Removing a blinded leaf should result in an error @@ -1279,4 +1323,199 @@ mod tests { ) )| { test(updates.into_iter().collect()) }); } + + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. + #[test] + fn sparse_trie_reveal_node_1() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) + ); + + // Insert the leaf for the second key + sparse.update_leaf(key2(), value().to_vec()).unwrap(); + + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes_3) = + hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + for (path, node) in proof_nodes_3.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the branch node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), key2(), key3()], + ); + + assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); + } + + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); + let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node exists + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); + + // Remove the leaf for the first key + sparse.remove_leaf(&key1()).unwrap(); + + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value()), (key3(), value())], + [key2()], + ); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that nothing changed in the extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + } + + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extensino node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. + #[test] + fn sparse_trie_reveal_node_3() { + let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); + let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); + let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); + let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, proof_nodes) = hash_builder_root_with_proofs( + [(key1(), value()), (key2(), value())], + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + ) + .unwrap(); + + // Check that the root extension node exists + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None }) if *key == Nibbles::from_nibbles([0x00]) + ); + + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value().to_vec()).unwrap(); + + // Check that the extension node was turned into a branch node + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, proof_nodes) = + hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + for (path, node) in proof_nodes.nodes_sorted() { + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + } + + // Check that the branch node wasn't overwritten by the extension node in the proof + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + ); + } } From fa30a4f758e5f166e73c99bc25fbff34962ea3be Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 23 Oct 2024 16:29:32 +0200 Subject: [PATCH 115/970] feat: add osaka hardfork (#11984) --- crates/chainspec/src/spec.rs | 8 ++++++++ crates/ethereum-forks/src/hardfork/ethereum.rs | 2 ++ crates/ethereum-forks/src/hardforks/ethereum.rs | 5 +++++ crates/ethereum/cli/src/chainspec.rs | 4 +++- crates/ethereum/evm/src/config.rs | 4 +++- 5 files changed, 21 insertions(+), 2 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a7f45727dd8..0e38d866b0d 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -617,6 +617,7 @@ impl From for ChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (EthereumHardfork::Osaka.boxed(), genesis.config.osaka_time), ]; let mut time_hardforks = time_hardfork_opts @@ -864,6 +865,13 @@ impl ChainSpecBuilder { self } + /// Enable Osaka at genesis. + pub fn osaka_activated(mut self) -> Self { + self = self.prague_activated(); + self.hardforks.insert(EthereumHardfork::Osaka, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`ChainSpec`]. /// /// # Panics diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs index 3d85b54a960..4e13b001786 100644 --- a/crates/ethereum-forks/src/hardfork/ethereum.rs +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -49,6 +49,8 @@ hardfork!( Cancun, /// Prague: Prague, + /// Osaka: + Osaka, } ); diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index 3069367158f..086d2d3b46e 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -21,6 +21,11 @@ pub trait EthereumHardforks: Hardforks { self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } + /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. + fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + } + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index cbcce9f69f6..a60d7017942 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -89,7 +89,8 @@ mod tests { "terminalTotalDifficulty": 0, "shanghaiTime": 0, "cancunTime": 0, - "pragueTime": 0 + "pragueTime": 0, + "osakaTime": 0 } }"#; @@ -97,5 +98,6 @@ mod tests { assert!(spec.is_shanghai_active_at_timestamp(0)); assert!(spec.is_cancun_active_at_timestamp(0)); assert!(spec.is_prague_active_at_timestamp(0)); + assert!(spec.is_osaka_active_at_timestamp(0)); } } diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index e5253307b33..9d6b6d8796c 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -11,7 +11,9 @@ pub fn revm_spec_by_timestamp_after_merge( chain_spec: &ChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.is_prague_active_at_timestamp(timestamp) { + if chain_spec.is_osaka_active_at_timestamp(timestamp) { + revm_primitives::OSAKA + } else if chain_spec.is_prague_active_at_timestamp(timestamp) { revm_primitives::PRAGUE } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { revm_primitives::CANCUN From b7167a9ddc8377a235f6d1d50e4581127f2a8bf0 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:30:57 +0200 Subject: [PATCH 116/970] test(tx-pool): add unit test for `GetPooledTransactionLimit` (#11975) --- crates/transaction-pool/src/traits.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index cedec56063b..00cda8e1cbe 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1594,4 +1594,27 @@ mod tests { assert_eq!(pooled_tx.blob_sidecar, EthBlobTransactionSidecar::None); assert_eq!(pooled_tx.cost, U256::from(100) + U256::from(10 * 1000)); } + + #[test] + fn test_pooled_transaction_limit() { + // No limit should never exceed + let limit_none = GetPooledTransactionLimit::None; + // Any size should return false + assert!(!limit_none.exceeds(1000)); + + // Size limit of 2MB (2 * 1024 * 1024 bytes) + let size_limit_2mb = GetPooledTransactionLimit::ResponseSizeSoftLimit(2 * 1024 * 1024); + + // Test with size below the limit + // 1MB is below 2MB, should return false + assert!(!size_limit_2mb.exceeds(1024 * 1024)); + + // Test with size exactly at the limit + // 2MB equals the limit, should return false + assert!(!size_limit_2mb.exceeds(2 * 1024 * 1024)); + + // Test with size exceeding the limit + // 3MB is above the 2MB limit, should return true + assert!(size_limit_2mb.exceeds(3 * 1024 * 1024)); + } } From 889a7e0b981e4a36394f6b4e554483daaabfeb25 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:31:22 +0200 Subject: [PATCH 117/970] primitive-traits: use alloy `_DURATION` constants (#11960) --- Cargo.lock | 2 ++ crates/node/core/Cargo.toml | 15 ++++----------- crates/node/core/src/args/payload_builder.rs | 3 ++- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 4 ++-- crates/primitives-traits/src/constants/mod.rs | 10 ---------- 6 files changed, 11 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2d134cb85f..92a9d6887d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6370,6 +6370,7 @@ name = "reth-basic-payload-builder" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "futures-core", @@ -7911,6 +7912,7 @@ name = "reth-node-core" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "clap", diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 73c552f4d7a..0c9672d1777 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -40,6 +40,7 @@ reth-stages-types.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } alloy-consensus.workspace = true +alloy-eips.workspace = true # misc eyre.workspace = true @@ -76,18 +77,10 @@ proptest.workspace = true tokio.workspace = true [features] -optimism = [ - "reth-primitives/optimism", - "reth-db/optimism" -] +optimism = ["reth-primitives/optimism", "reth-db/optimism"] # Features for vergen to generate correct env vars -jemalloc = [ - "reth-cli-util/jemalloc" -] -asm-keccak = [ - "reth-primitives/asm-keccak", - "alloy-primitives/asm-keccak" -] +jemalloc = ["reth-cli-util/jemalloc"] +asm-keccak = ["reth-primitives/asm-keccak", "alloy-primitives/asm-keccak"] [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index 4a18fd5b0b7..dceb10726ff 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,11 +1,12 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::merge::SLOT_DURATION; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, SLOT_DURATION}; +use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 9047768892a..88ab99272db 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -27,6 +27,7 @@ alloy-rlp.workspace = true alloy-primitives.workspace = true revm.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index fcc8be9a88e..4274d451e43 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,6 +10,7 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::merge::SLOT_DURATION; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -22,8 +23,7 @@ use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; use reth_primitives::{ - constants::{RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, SealedBlock, Withdrawals, + constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, }; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 2874c596a8a..a4091a4a9d9 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,7 +1,6 @@ //! Ethereum protocol-related constants use alloy_primitives::{address, b256, Address, B256, U256}; -use core::time::Duration; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -13,15 +12,6 @@ pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION" /// An EPOCH is a series of 32 slots. pub const EPOCH_SLOTS: u64 = 32; -/// The duration of a slot in seconds. -/// -/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a -/// block. -pub const SLOT_DURATION: Duration = Duration::from_secs(12); - -/// An EPOCH is a series of 32 slots (~6.4min). -pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); - /// The default block nonce in the beacon consensus pub const BEACON_NONCE: u64 = 0u64; From 89eb73f3d23f6d6771e472e6588633e8cb284943 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 23 Oct 2024 16:44:37 +0200 Subject: [PATCH 118/970] refactor: replace extra fields with `ExecutionPayloadSidecar` in engine (#11901) --- Cargo.lock | 2 - .../src/commands/debug_cmd/replay_engine.rs | 6 +- crates/consensus/beacon/Cargo.toml | 13 ++- crates/consensus/beacon/src/engine/handle.rs | 15 +--- crates/consensus/beacon/src/engine/message.rs | 12 +-- crates/consensus/beacon/src/engine/mod.rs | 50 +++++------ .../consensus/beacon/src/engine/test_utils.rs | 10 +-- crates/engine/local/src/miner.rs | 9 +- crates/engine/tree/src/tree/mod.rs | 34 +++----- crates/engine/util/src/engine_store.rs | 17 ++-- crates/engine/util/src/reorg.rs | 85 ++++++++----------- crates/engine/util/src/skip_new_payload.rs | 16 +--- crates/payload/validator/Cargo.toml | 1 - crates/payload/validator/src/lib.rs | 21 ++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 44 +++++++--- crates/rpc/rpc-engine-api/tests/it/payload.rs | 19 +++-- .../rpc-types-compat/src/engine/payload.rs | 40 ++++----- 17 files changed, 179 insertions(+), 215 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92a9d6887d6..350e6a77ae5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6394,7 +6394,6 @@ dependencies = [ name = "reth-beacon-consensus" version = "1.1.0" dependencies = [ - "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", @@ -8356,7 +8355,6 @@ dependencies = [ name = "reth-payload-validator" version = "1.1.0" dependencies = [ - "alloy-eips", "alloy-rpc-types", "reth-chainspec", "reth-primitives", diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index de497cbe007..e7b3de6b6c1 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -170,10 +170,8 @@ impl> Command { beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } - StoredEngineApiMessage::NewPayload { payload, cancun_fields } => { - // todo: prague (last arg) - let response = - beacon_engine_handle.new_payload(payload, cancun_fields, None).await?; + StoredEngineApiMessage::NewPayload { payload, sidecar } => { + let response = beacon_engine_handle.new_payload(payload, sidecar).await?; debug!(target: "reth::cli", ?response, "Received for new payload"); } }; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index dd1e339319b..b141bd34edb 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -31,7 +31,6 @@ reth-node-types.workspace = true reth-chainspec = { workspace = true, optional = true } # ethereum -alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true @@ -78,10 +77,10 @@ assert_matches.workspace = true [features] optimism = [ - "reth-chainspec", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-blockchain-tree/optimism", - "reth-db/optimism", - "reth-db-api/optimism" + "reth-chainspec", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-blockchain-tree/optimism", + "reth-db/optimism", + "reth-db-api/optimism", ] diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index bb5c4dee174..4aafc6e07c1 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -4,9 +4,8 @@ use crate::{ engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; -use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; use reth_engine_primitives::EngineTypes; @@ -47,18 +46,10 @@ where pub async fn new_payload( &self, payload: ExecutionPayload, - cancun_fields: Option, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let (tx, rx) = oneshot::channel(); - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 56328f03db0..e33decbd848 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,7 +1,6 @@ use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; -use alloy_eips::eip7685::Requests; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; @@ -145,12 +144,9 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, - /// The cancun-related newPayload fields, if any. - cancun_fields: Option, - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - /// The pectra EIP-7685 execution requests. - execution_requests: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index cff648b2843..5af1e26acca 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,7 +1,6 @@ -use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use futures::{stream::BoxStream, Future, StreamExt}; @@ -1081,14 +1080,11 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, cancun_fields), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); @@ -1118,11 +1114,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload( - payload, - cancun_fields.into(), - execution_requests, - ) { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", %error, "Invalid payload"); @@ -1867,13 +1859,8 @@ where BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { this.on_forkchoice_updated(state, payload_attrs, tx); } - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - } => { - match this.on_new_payload(payload, cancun_fields, execution_requests) { + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + match this.on_new_payload(payload, sidecar) { Ok(Either::Right(block)) => { this.set_blockchain_tree_action( BlockchainTreeAction::InsertNewPayload { block, tx }, @@ -2061,7 +2048,12 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env + .send_new_payload( + block_to_payload_v1(SealedBlock::default()), + ExecutionPayloadSidecar::none(), + ) + .await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2626,7 +2618,7 @@ mod tests { 0, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2641,7 +2633,7 @@ mod tests { 1, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2719,7 +2711,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); @@ -2854,7 +2849,9 @@ mod tests { 2, BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, ); - let res = env.send_new_payload(block_to_payload_v1(block), None).await; + let res = env + .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) + .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2924,7 +2921,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 7e9e1ec6b26..912f0a871bf 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloy_primitives::{BlockNumber, Sealable, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -68,9 +68,9 @@ impl TestEnv { pub async fn send_new_payload>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { - self.engine_handle.new_payload(payload.into(), cancun_fields, None).await + self.engine_handle.new_payload(payload.into(), sidecar).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -78,11 +78,11 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let payload: ExecutionPayload = payload.into(); loop { - let result = self.send_new_payload(payload.clone(), cancun_fields.clone()).await?; + let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 552cbd04776..706ddc43de3 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,7 +1,7 @@ //! Contains the implementation of the mining mode for the local engine. use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types_engine::{CancunPayloadFields, ForkchoiceState}; +use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; @@ -221,9 +221,10 @@ where let (tx, rx) = oneshot::channel(); self.to_engine.send(BeaconEngineMessage::NewPayload { payload: block_to_payload(payload.block().clone()), - cancun_fields, - // todo: prague - execution_requests: None, + // todo: prague support + sidecar: cancun_fields + .map(ExecutionPayloadSidecar::v3) + .unwrap_or_else(ExecutionPayloadSidecar::none), tx, })?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 021b3149ad5..555cf89164f 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -10,7 +10,7 @@ use alloy_primitives::{ BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use reth_beacon_consensus::{ @@ -70,7 +70,6 @@ use crate::{ engine::{EngineApiKind, EngineApiRequest}, tree::metrics::EngineApiMetrics, }; -use alloy_eips::eip7685::Requests; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -722,8 +721,7 @@ where fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -754,11 +752,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload( - payload, - cancun_fields.into(), - execution_requests, - ) { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -1241,14 +1235,8 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - } => { - let output = - self.on_new_payload(payload, cancun_fields, execution_requests); + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + let output = self.on_new_payload(payload, sidecar); if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { reth_beacon_consensus::BeaconOnNewPayloadError::Internal( Box::new(e), @@ -2585,6 +2573,7 @@ mod tests { use crate::persistence::PersistenceAction; use alloy_primitives::{Bytes, Sealable}; use alloy_rlp::Decodable; + use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; @@ -2862,11 +2851,10 @@ mod tests { self.tree .on_new_payload( payload.into(), - Some(CancunPayloadFields { + ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), - None, ) .unwrap(); } @@ -3129,7 +3117,10 @@ mod tests { let mut test_harness = TestHarness::new(HOLESKY.clone()); - let outcome = test_harness.tree.on_new_payload(payload.into(), None, None).unwrap(); + let outcome = test_harness + .tree + .on_new_payload(payload.into(), ExecutionPayloadSidecar::none()) + .unwrap(); assert!(outcome.outcome.is_syncing()); // ensure block is buffered @@ -3173,8 +3164,7 @@ mod tests { .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { payload: payload.clone().into(), - cancun_fields: None, - execution_requests: None, + sidecar: ExecutionPayloadSidecar::none(), tx, } .into(), diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index de193bf3bbe..85c5e126fa4 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,6 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. -use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; @@ -30,8 +30,9 @@ pub enum StoredEngineApiMessage { NewPayload { /// The [`ExecutionPayload`] sent in the persisted call. payload: ExecutionPayload, - /// The Cancun-specific fields sent in the persisted call, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, }, } @@ -73,20 +74,14 @@ impl EngineMessageStore { })?, )?; } - // todo(onbjerg): execution requests - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests: _, - tx: _tx, - } => { + BeaconEngineMessage::NewPayload { payload, sidecar, tx: _tx } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), serde_json::to_vec( &StoredEngineApiMessage::::NewPayload { payload: payload.clone(), - cancun_fields: cancun_fields.clone(), + sidecar: sidecar.clone(), }, )?, )?; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 85216e32fad..d109fb9e94a 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,10 +1,9 @@ //! Stream wrapper that simulates reorgs. use alloy_consensus::Transaction; -use alloy_eips::eip7685::Requests; use alloy_primitives::U256; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; @@ -150,12 +149,7 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }), + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -170,29 +164,26 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_cancun_fields, reorg_execution_requests) = - match create_reorg_head( - this.provider, - this.evm_config, - this.payload_validator, - *this.depth, - payload.clone(), - cancun_fields.clone(), - execution_requests.clone(), - ) { - Ok(result) => result, - Err(error) => { - error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); - // Forward the payload and attempt to create reorg on top of - // the next one - return Poll::Ready(Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - })) - } - }; + let (reorg_payload, reorg_sidecar) = match create_reorg_head( + this.provider, + this.evm_config, + this.payload_validator, + *this.depth, + payload.clone(), + sidecar.clone(), + ) { + Ok(result) => result, + Err(error) => { + error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head"); + // Forward the payload and attempt to create reorg on top of + // the next one + return Poll::Ready(Some(BeaconEngineMessage::NewPayload { + payload, + sidecar, + tx, + })) + } + }; let reorg_forkchoice_state = ForkchoiceState { finalized_block_hash: last_forkchoice_state.finalized_block_hash, safe_block_hash: last_forkchoice_state.safe_block_hash, @@ -208,17 +199,11 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }, + BeaconEngineMessage::NewPayload { payload, sidecar, tx }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, - cancun_fields: reorg_cancun_fields, - execution_requests: reorg_execution_requests, + sidecar: reorg_sidecar, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -252,9 +237,8 @@ fn create_reorg_head( payload_validator: &ExecutionPayloadValidator, mut depth: usize, next_payload: ExecutionPayload, - next_cancun_fields: Option, - next_execution_requests: Option, -) -> RethResult<(ExecutionPayload, Option, Option)> + next_sidecar: ExecutionPayloadSidecar, +) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, @@ -264,11 +248,7 @@ where // Ensure next payload is valid. let next_block = payload_validator - .ensure_well_formed_payload( - next_payload, - next_cancun_fields.into(), - next_execution_requests, - ) + .ensure_well_formed_payload(next_payload, next_sidecar) .map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. @@ -439,11 +419,16 @@ where Ok(( block_to_payload(reorg_block), + // todo(onbjerg): how do we support execution requests? reorg_target .header .parent_beacon_block_root - .map(|root| CancunPayloadFields { parent_beacon_block_root: root, versioned_hashes }), - // todo(prague) - None, + .map(|root| { + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: root, + versioned_hashes, + }) + }) + .unwrap_or_else(ExecutionPayloadSidecar::none), )) } diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index 47c48282eef..16f2e98197c 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -41,19 +41,14 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }) => { + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( target: "engine::stream::skip_new_payload", block_number = payload.block_number(), block_hash = %payload.block_hash(), - ?cancun_fields, + ?sidecar, threshold=this.threshold, skipped=this.skipped, "Skipping new payload" ); @@ -61,12 +56,7 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { - payload, - cancun_fields, - execution_requests, - tx, - }) + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) } next => next, }; diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index 619b99f28de..2662b987f88 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -18,5 +18,4 @@ reth-primitives.workspace = true reth-rpc-types-compat.workspace = true # alloy -alloy-eips.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 3ec7b206a5b..9952815fd98 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,8 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_eips::eip7685::Requests; -use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; +use alloy_rpc_types::engine::{ + ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, +}; use reth_chainspec::EthereumHardforks; use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::try_into_block; @@ -112,15 +113,12 @@ impl ExecutionPayloadValidator { pub fn ensure_well_formed_payload( &self, payload: ExecutionPayload, - cancun_fields: MaybeCancunPayloadFields, - execution_requests: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = - try_into_block(payload, cancun_fields.parent_beacon_block_root(), execution_requests)? - .seal_slow(); + let sealed_block = try_into_block(payload, &sidecar)?.seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { @@ -139,7 +137,7 @@ impl ExecutionPayloadValidator { // cancun active but excess blob gas not present return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) } - if cancun_fields.as_ref().is_none() { + if sidecar.cancun().is_none() { // cancun active but cancun fields not present return Err(PayloadError::PostCancunWithoutCancunFields) } @@ -156,7 +154,7 @@ impl ExecutionPayloadValidator { // cancun not active but excess blob gas present return Err(PayloadError::PreCancunBlockWithExcessBlobGas) } - if cancun_fields.as_ref().is_some() { + if sidecar.cancun().is_some() { // cancun not active but cancun fields present return Err(PayloadError::PreCancunWithCancunFields) } @@ -175,7 +173,10 @@ impl ExecutionPayloadValidator { } // EIP-4844 checks - self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; + self.ensure_matching_blob_versioned_hashes( + &sealed_block, + &sidecar.cancun().cloned().into(), + )?; Ok(sealed_block) } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index ca055a77ea1..eb280408ecd 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -5,8 +5,8 @@ use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, - ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -140,7 +140,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -156,7 +160,11 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await?) } /// See also @@ -176,9 +184,17 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields), None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v3(CancunPayloadFields { + versioned_hashes, + parent_beacon_block_root, + }), + ) + .await?) } /// See also @@ -187,8 +203,6 @@ where payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, - // TODO(onbjerg): Figure out why we even get these here, since we'll check the requests - // from execution against the requests root in the header. execution_requests: Requests, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); @@ -201,14 +215,16 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - - // HACK(onbjerg): We should have a pectra payload fields struct, this is just a temporary - // workaround. Ok(self .inner .beacon_consensus - .new_payload(payload, Some(cancun_fields), Some(execution_requests)) + .new_payload( + payload, + ExecutionPayloadSidecar::v4( + CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, + execution_requests, + ), + ) .await?) } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 007a62db045..febbc291e35 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -3,7 +3,8 @@ use alloy_primitives::{Bytes, Sealable, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, }; use assert_matches::assert_matches; use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; @@ -75,7 +76,10 @@ fn payload_validation() { b }); - assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None, None), Ok(_)); + assert_matches!( + try_into_sealed_block(block_with_valid_extra_data, &ExecutionPayloadSidecar::none()), + Ok(_) + ); // Invalid extra data let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); @@ -84,7 +88,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block, None, None), + try_into_sealed_block(invalid_extra_data_block, &ExecutionPayloadSidecar::none()), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -94,7 +98,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_zero_base_fee, None, None), + try_into_sealed_block(block_with_zero_base_fee, &ExecutionPayloadSidecar::none()), Err(PayloadError::BaseFee(val)) if val.is_zero() ); @@ -113,7 +117,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_ommers.clone(), None, None), + try_into_sealed_block(block_with_ommers.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -124,7 +128,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_difficulty.clone(), None, None), + try_into_sealed_block(block_with_difficulty.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() ); @@ -134,9 +138,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_nonce.clone(), None, None), + try_into_sealed_block(block_with_nonce.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() - ); // Valid block diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index b63b7453aeb..b4c45a61781 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -9,7 +9,8 @@ use alloy_eips::{ use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, + ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, PayloadError, }; use reth_primitives::{ proofs::{self}, @@ -248,17 +249,18 @@ pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayload } } -/// Tries to create a new block (without a block hash) from the given payload and optional parent -/// beacon block root. +/// Tries to create a new unsealed block from the given payload and payload sidecar. +/// /// Performs additional validation of `extra_data` and `base_fee_per_gas` fields. /// -/// NOTE: The log bloom is assumed to be validated during serialization. +/// # Note +/// +/// The log bloom is assumed to be validated during serialization. /// /// See pub fn try_into_block( value: ExecutionPayload, - parent_beacon_block_root: Option, - execution_requests: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let mut base_payload = match value { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, @@ -266,29 +268,30 @@ pub fn try_into_block( ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, }; - base_payload.header.parent_beacon_block_root = parent_beacon_block_root; - base_payload.header.requests_hash = execution_requests.map(|reqs| reqs.requests_hash()); + base_payload.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); + base_payload.header.requests_hash = sidecar.requests().map(Requests::requests_hash); Ok(base_payload) } -/// Tries to create a new block from the given payload and optional parent beacon block root. -/// -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. +/// Tries to create a sealed new block from the given payload and payload sidecar. /// /// Uses [`try_into_block`] to convert from the [`ExecutionPayload`] to [`Block`] and seals the /// block with its hash. /// /// Uses [`validate_block_hash`] to validate the payload block hash and ultimately return the /// [`SealedBlock`]. +/// +/// # Note +/// +/// Empty ommers, nonce, difficulty, and execution request values are validated upon computing block +/// hash and comparing the value with `payload.block_hash`. pub fn try_into_sealed_block( payload: ExecutionPayload, - parent_beacon_block_root: Option, - execution_requests: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let block_hash = payload.block_hash(); - let base_payload = try_into_block(payload, parent_beacon_block_root, execution_requests)?; + let base_payload = try_into_block(payload, sidecar)?; // validate block hash and return validate_block_hash(block_hash, base_payload) @@ -356,8 +359,8 @@ mod tests { }; use alloy_primitives::{b256, hex, Bytes, U256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV2, ExecutionPayloadV3, }; #[test] @@ -575,8 +578,7 @@ mod tests { let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; // convert into block - let block = - try_into_block(payload, Some(cancun_fields.parent_beacon_block_root), None).unwrap(); + let block = try_into_block(payload, &ExecutionPayloadSidecar::v3(cancun_fields)).unwrap(); // Ensure the actual hash is calculated if we set the fields to what they should be validate_block_hash(block_hash_with_blob_fee_fields, block).unwrap(); From bf612bee50cada67624791baa7707f260acf1be4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 23 Oct 2024 17:03:25 +0200 Subject: [PATCH 119/970] chore(hive): update expected failures (#12006) --- .github/assets/hive/expected_failures.yaml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index d4b3d2bcbd3..ec7bd054900 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -41,8 +41,6 @@ engine-withdrawals: - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 engine-api: [] # https://github.com/paradigmxyz/reth/issues/8305 @@ -58,6 +56,4 @@ engine-cancun: - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) -# https://github.com/paradigmxyz/reth/issues/8579 -sync: - - sync reth -> reth +sync: [] From f3853e71b36314a370ace6bf3ac699531e635be7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Oct 2024 16:22:59 +0100 Subject: [PATCH 120/970] test(trie): get sparse trie nodes at depth (#12007) --- crates/trie/sparse/src/trie.rs | 86 ++++++++++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index fae1141ec1c..4d195cbf34c 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -569,6 +569,17 @@ impl RevealedSparseTrie { /// Update hashes of the nodes that are located at a level deeper than or equal to the provided /// depth. Root node has a level of 0. pub fn update_rlp_node_level(&mut self, depth: usize) { + let targets = self.get_nodes_at_depth(depth); + let mut prefix_set = self.prefix_set.clone().freeze(); + for target in targets { + self.rlp_node(target, &mut prefix_set); + } + } + + /// Returns a list of paths to the nodes that are located at the provided depth when counting + /// from the root node. If there's a leaf at a depth less than the provided depth, it will be + /// included in the result. + fn get_nodes_at_depth(&self, depth: usize) -> HashSet { let mut paths = Vec::from([(Nibbles::default(), 0)]); let mut targets = HashSet::::default(); @@ -602,10 +613,7 @@ impl RevealedSparseTrie { } } - let mut prefix_set = self.prefix_set.clone().freeze(); - for target in targets { - self.rlp_node(target, &mut prefix_set); - } + targets } fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { @@ -1518,4 +1526,74 @@ mod tests { Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) ); } + + #[test] + fn sparse_trie_get_nodes_at_depth() { + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + assert_eq!(sparse.get_nodes_at_depth(0), HashSet::from([Nibbles::default()])); + assert_eq!( + sparse.get_nodes_at_depth(1), + HashSet::from([Nibbles::from_nibbles_unchecked([0x5])]) + ); + assert_eq!( + sparse.get_nodes_at_depth(2), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3]) + ]) + ); + assert_eq!( + sparse.get_nodes_at_depth(3), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) + ]) + ); + assert_eq!( + sparse.get_nodes_at_depth(4), + HashSet::from([ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) + ]) + ); + } } From b73261936ee0995818030d696ad1c8778ecd449a Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Oct 2024 16:59:22 +0100 Subject: [PATCH 121/970] chore(trie): prefix set doc comment clarification (#12010) --- crates/trie/trie/src/prefix_set.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index da912fbbdad..0cf16f939d7 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -168,8 +168,7 @@ pub struct PrefixSet { } impl PrefixSet { - /// Returns `true` if any of the keys in the set has the given prefix or - /// if the given prefix is a prefix of any key in the set. + /// Returns `true` if any of the keys in the set has the given prefix #[inline] pub fn contains(&mut self, prefix: &[u8]) -> bool { if self.all { From 57a21fcb9efc274675ea00a644ebbfa627940c6d Mon Sep 17 00:00:00 2001 From: Julian Meyer Date: Wed, 23 Oct 2024 09:22:51 -0700 Subject: [PATCH 122/970] chore: increase max proof window (#12001) --- crates/rpc/rpc-server-types/src/constants.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 0bc44181932..126bc722fd5 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -51,9 +51,9 @@ pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256; /// The default eth historical proof window. pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; -/// Maximum eth historical proof window. Equivalent to roughly one and a half months of data on a 12 -/// second block time, and a week on a 2 second block time. -pub const MAX_ETH_PROOF_WINDOW: u64 = 7 * 24 * 60 * 60 / 2; +/// Maximum eth historical proof window. Equivalent to roughly 6 months of data on a 12 +/// second block time, and a month on a 2 second block time. +pub const MAX_ETH_PROOF_WINDOW: u64 = 28 * 24 * 60 * 60 / 2; /// GPO specific constants pub mod gas_oracle { From d6f5a89a277a9490137d634b21c4cca4e728015f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 20:40:57 +0200 Subject: [PATCH 123/970] test: tests for empty block bodies (#12013) --- crates/net/eth-wire-types/src/blocks.rs | 9 +++++++++ crates/net/eth-wire-types/src/message.rs | 16 +++++++++++++++- crates/primitives/src/block.rs | 9 +++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index d60c63fc1f6..878b4573f2b 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -497,4 +497,13 @@ mod tests { let result = RequestPair::decode(&mut &data[..]).unwrap(); assert_eq!(result, expected); } + + #[test] + fn empty_block_bodies_rlp() { + let body = BlockBodies::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 9ef8e6c7147..4afcb34e13b 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -494,7 +494,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, GetNodeData, NodeData, ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, + ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -566,4 +567,17 @@ mod tests { let result = RequestPair::>::decode(&mut &*raw_pair); assert!(matches!(result, Err(Error::UnexpectedLength))); } + + #[test] + fn empty_block_bodies_protocol() { + let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); + let mut buf = Vec::new(); + empty_block_bodies.encode(&mut buf); + let decoded = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap(); + assert_eq!(empty_block_bodies, decoded); + } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 717b0446bea..a06979300ac 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1090,4 +1090,13 @@ mod tests { let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } + + #[test] + fn empty_block_rlp() { + let body = BlockBody::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } From 2fb63b04911affa850ada5a336953942f4f11b0f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Oct 2024 21:40:29 +0200 Subject: [PATCH 124/970] chore: dont log if nothing to evict (#12015) --- crates/net/discv4/src/lib.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 779c7ee637a..a99906bdf09 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1513,11 +1513,12 @@ impl Discv4Service { true }); - trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); - - // remove nodes that failed to pong - for node_id in failed_pings { - self.remove_node(node_id); + if !failed_pings.is_empty() { + // remove nodes that failed to pong + trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); + for node_id in failed_pings { + self.remove_node(node_id); + } } let mut failed_lookups = Vec::new(); @@ -1528,11 +1529,13 @@ impl Discv4Service { } true }); - trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); - // remove nodes that failed the e2e lookup process, so we can restart it - for node_id in failed_lookups { - self.remove_node(node_id); + if !failed_lookups.is_empty() { + // remove nodes that failed the e2e lookup process, so we can restart it + trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); + for node_id in failed_lookups { + self.remove_node(node_id); + } } self.evict_failed_find_nodes(now); @@ -1553,6 +1556,10 @@ impl Discv4Service { true }); + if failed_find_nodes.is_empty() { + return + } + trace!(target: "discv4", num=%failed_find_nodes.len(), "processing failed find nodes"); for node_id in failed_find_nodes { From 565e4b400d6e3a4125efdcd50bb5a2c2d60f65ca Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 01:15:15 +0200 Subject: [PATCH 125/970] refactor(primitive-traits): use alloy `ETHEREUM_BLOCK_GAS_LIMIT` constant (#12019) --- Cargo.lock | 1 + crates/chainspec/src/spec.rs | 6 ++--- crates/node/core/src/args/payload_builder.rs | 3 +-- crates/node/core/src/args/txpool.rs | 3 ++- crates/optimism/chainspec/Cargo.toml | 22 ++++++++++--------- crates/optimism/chainspec/src/op.rs | 2 +- crates/optimism/chainspec/src/op_sepolia.rs | 2 +- crates/primitives-traits/src/constants/mod.rs | 3 --- crates/rpc/rpc-server-types/src/constants.rs | 5 ++--- crates/rpc/rpc/src/eth/helpers/state.rs | 2 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 2 +- crates/transaction-pool/src/config.rs | 3 ++- crates/transaction-pool/src/noop.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 5 ++--- crates/transaction-pool/tests/it/evict.rs | 3 ++- 15 files changed, 32 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 350e6a77ae5..b66da618106 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8060,6 +8060,7 @@ version = "1.1.0" dependencies = [ "alloy-chains", "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "derive_more 1.0.0", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 0e38d866b0d..bebf7ca2602 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -9,6 +9,7 @@ use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -18,10 +19,7 @@ use reth_network_peers::{ sepolia_nodes, NodeRecord, }; use reth_primitives_traits::{ - constants::{ - EIP1559_INITIAL_BASE_FEE, ETHEREUM_BLOCK_GAS_LIMIT, HOLESKY_GENESIS_HASH, - SEPOLIA_GENESIS_HASH, - }, + constants::{EIP1559_INITIAL_BASE_FEE, HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, Header, SealedHeader, }; use reth_trie_common::root::state_root_ref_unhashed; diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index dceb10726ff..524a93195de 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,12 +1,11 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; -use alloy_eips::merge::SLOT_DURATION; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, merge::SLOT_DURATION}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 63f6c566ca2..282313555f7 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,9 +1,10 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::Address; use clap::Args; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 6b068dabbf0..4e573ce2994 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -26,6 +26,7 @@ alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # op op-alloy-rpc-types.workspace = true @@ -45,14 +46,15 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-genesis/std", - "alloy-primitives/std", - "op-alloy-rpc-types/std", - "reth-chainspec/std", - "reth-ethereum-forks/std", - "reth-primitives-traits/std", - "reth-optimism-forks/std", - "alloy-consensus/std", - "once_cell/std" + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std", ] diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 8c0da5320f9..5afb236cd33 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index d3243ebd534..31c9eda6bdd 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index a4091a4a9d9..377f66cf004 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -15,9 +15,6 @@ pub const EPOCH_SLOTS: u64 = 32; /// The default block nonce in the beacon consensus pub const BEACON_NONCE: u64 = 0u64; -/// The default Ethereum block gas limit. -pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; - /// The minimum tx fee below which the txpool will reject the transaction. /// /// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 126bc722fd5..48019745a34 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -80,9 +80,8 @@ pub mod gas_oracle { /// The default gas limit for `eth_call` and adjacent calls. /// - /// This is different from the default to regular 30M block gas limit - /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow - /// for more complex calls. + /// This is different from the default to regular 30M block gas limit `ETHEREUM_BLOCK_GAS_LIMIT` + /// to allow for more complex calls. pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; /// Allowed error ratio for gas estimation diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 8a35842798b..429a10333d1 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -45,11 +45,11 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 8bd9997f6e8..24a13cb8062 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -55,11 +55,11 @@ where #[cfg(test)] mod tests { + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 30703f888c3..8fe49f47652 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -3,8 +3,9 @@ use crate::{ PoolSize, TransactionOrigin, }; use alloy_consensus::constants::EIP4844_TX_TYPE_ID; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::Address; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 4464ae1fc8a..11c5e7eea29 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,10 +16,10 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::BlobAndProofV1}; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlobTransactionSidecar}; +use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 03e69c39067..a3f192992d1 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -22,10 +22,9 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, -}; +use reth_primitives::constants::{eip4844::BLOB_TX_MIN_BLOB_GASPRICE, MIN_PROTOCOL_BASE_FEE}; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index c7438c9964e..c1d0bbaa642 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,8 +1,9 @@ //! Transaction pool eviction tests. +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ From 044e2d6aea9fbc214563a45ff432c2ef01b42206 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 01:15:45 +0200 Subject: [PATCH 126/970] refactor(primitive-traits): use alloy `EPOCH_SLOTS` constant (#12018) --- Cargo.lock | 1 + crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/mod.rs | 5 ++--- crates/primitives-traits/src/constants/mod.rs | 3 --- crates/storage/provider/src/providers/state/historical.rs | 3 ++- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b66da618106..155cb3417bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6394,6 +6394,7 @@ dependencies = [ name = "reth-beacon-consensus" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index b141bd34edb..1abc09b2a44 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -33,6 +33,7 @@ reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5af1e26acca..2363b907840 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,3 +1,4 @@ +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -19,9 +20,7 @@ use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - constants::EPOCH_SLOTS, BlockNumHash, Head, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockNumHash, Head, Header, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 377f66cf004..86bd9349585 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -9,9 +9,6 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// An EPOCH is a series of 32 slots. -pub const EPOCH_SLOTS: u64 = 32; - /// The default block nonce in the beacon consensus pub const BEACON_NONCE: u64 = 0u64; diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 640041e0801..56a1d057e70 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -2,6 +2,7 @@ use crate::{ providers::{state::macros::delegate_provider_impls, StaticFileProvider}, AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, }; +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, @@ -13,7 +14,7 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; +use reth_primitives::{Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ From f2195026ccbc1d74298362901f4edede0d363023 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 02:10:00 +0200 Subject: [PATCH 127/970] test: more unit tests for `TreeState` (#11687) --- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/state.rs | 300 ++++++++++++++++++++++++++++ 2 files changed, 301 insertions(+) diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index aa8fab16fa5..3fa6de2b402 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -57,6 +57,7 @@ reth-consensus = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-revm.workspace = true reth-evm-ethereum.workspace = true +reth-execution-types.workspace = true parking_lot.workspace = true assert_matches.workspace = true alloy-genesis.workspace = true diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index b76db9e6a9c..ca8af6f9b58 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -61,6 +61,7 @@ impl TreeState { pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.block_with_senders_by_hash(block_hash).map(|block| &block.block) } + /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. @@ -128,3 +129,302 @@ impl From for SidechainId { Self(value) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::canonical_chain::CanonicalChain; + use alloy_primitives::B256; + use reth_execution_types::Chain; + use reth_provider::ExecutionOutcome; + + #[test] + fn test_tree_state_initialization() { + // Set up some dummy data for initialization + let last_finalized_block_number = 10u64; + let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; + let buffer_limit = 5; + + // Initialize the tree state + let tree_state = TreeState::new( + last_finalized_block_number, + last_canonical_hashes.clone(), + buffer_limit, + ); + + // Verify the tree state after initialization + assert_eq!(tree_state.block_chain_id_generator, 0); + assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); + assert_eq!( + *tree_state.block_indices.canonical_chain().inner(), + *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() + ); + assert!(tree_state.chains.is_empty()); + assert!(tree_state.buffered_blocks.lru.is_empty()); + } + + #[test] + fn test_tree_state_next_id() { + // Initialize the tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Generate a few sidechain IDs + let first_id = tree_state.next_id(); + let second_id = tree_state.next_id(); + + // Verify the generated sidechain IDs and the updated generator state + assert_eq!(first_id, SidechainId(0)); + assert_eq!(second_id, SidechainId(1)); + assert_eq!(tree_state.block_chain_id_generator, 2); + } + + #[test] + fn test_tree_state_insert_chain() { + // Initialize tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a chain with two blocks + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = block.clone(); + let mut block2 = block; + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + let chain = AppendableChain::new(Chain::new( + [block1, block2], + Default::default(), + Default::default(), + )); + + // Insert the chain into the TreeState + let chain_id = tree_state.insert_chain(chain).unwrap(); + + // Verify the chain ID and that it was added to the chains collection + assert_eq!(chain_id, SidechainId(0)); + assert!(tree_state.chains.contains_key(&chain_id)); + + // Ensure that the block indices are updated + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + + // Ensure that the block chain ID generator was updated + assert_eq!(tree_state.block_chain_id_generator, 1); + + // Create an empty chain + let chain_empty = AppendableChain::new(Chain::default()); + + // Insert the empty chain into the tree state + let chain_id = tree_state.insert_chain(chain_empty); + + // Ensure that the empty chain was not inserted + assert!(chain_id.is_none()); + + // Nothing should have changed and no new chain should have been added + assert!(tree_state.chains.contains_key(&SidechainId(0))); + assert!(!tree_state.chains.contains_key(&SidechainId(1))); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + assert_eq!(tree_state.block_chain_id_generator, 1); + } + + #[test] + fn test_block_by_hash_side_chain() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create an chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Retrieve the blocks by their hashes + let retrieved_block1 = tree_state.block_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1.block); + + let retrieved_block2 = tree_state.block_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2.block); + + // Test block_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_block_with_senders_by_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = SealedBlockWithSenders::default(); + let mut block2 = SealedBlockWithSenders::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create a chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test to retrieve the blocks with senders by their hashes + let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1); + + let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2); + + // Test block_with_senders_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_with_senders_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_get_buffered_block() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and add it to the buffer + let block_hash = B256::random(); + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + // Add the block to the buffered blocks in the TreeState + tree_state.buffered_blocks.insert_block(block.clone()); + + // Test get_buffered_block to retrieve the block by its hash + let retrieved_block = tree_state.get_buffered_block(&block_hash); + assert_eq!(*retrieved_block.unwrap(), block); + + // Test get_buffered_block with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.get_buffered_block(&non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_lowest_buffered_ancestor() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create blocks with random hashes and set up parent-child relationships + let ancestor_hash = B256::random(); + let descendant_hash = B256::random(); + + let mut ancestor_block = SealedBlockWithSenders::default(); + let mut descendant_block = SealedBlockWithSenders::default(); + + ancestor_block.block.header.set_hash(ancestor_hash); + descendant_block.block.header.set_hash(descendant_hash); + descendant_block.block.header.set_parent_hash(ancestor_hash); + + // Insert the blocks into the buffer + tree_state.buffered_blocks.insert_block(ancestor_block.clone()); + tree_state.buffered_blocks.insert_block(descendant_block.clone()); + + // Test lowest_buffered_ancestor for the descendant block + let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); + assert!(lowest_ancestor.is_some()); + assert_eq!(lowest_ancestor.unwrap().block.header.hash(), ancestor_hash); + + // Test lowest_buffered_ancestor with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); + + // Ensure that no ancestor is found + assert!(result.is_none()); + } + + #[test] + fn test_receipts_by_block_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and receipts + let block_hash = B256::random(); + let receipt1 = Receipt::default(); + let receipt2 = Receipt::default(); + + let mut block = SealedBlockWithSenders::default(); + block.block.header.set_hash(block_hash); + + let receipts = vec![receipt1, receipt2]; + + // Create a chain with the block and its receipts + let chain = AppendableChain::new(Chain::new( + vec![block.clone()], + ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, + Default::default(), + )); + + // Insert the chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test receipts_by_block_hash for the inserted block + let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); + assert!(retrieved_receipts.is_some()); + + // Check if the correct receipts are returned + let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); + assert_eq!(retrieved_receipts.unwrap(), receipts_ref); + + // Test receipts_by_block_hash with a non-existent block hash + let non_existent_hash = B256::random(); + let result = tree_state.receipts_by_block_hash(non_existent_hash); + + // Ensure that no receipts are found + assert!(result.is_none()); + } +} From 40935321e3127d02c11552a253e93fcb2a02bc37 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 24 Oct 2024 02:28:16 +0200 Subject: [PATCH 128/970] chore(cli): engine cli options conflict with legacy (#11993) --- bin/reth/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f424163a24f..e146912c06f 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -33,11 +33,11 @@ pub struct EngineArgs { pub legacy: bool, /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } From 082f2cd2356262d4ffd34ea9565bd4c1aba1a5f4 Mon Sep 17 00:00:00 2001 From: caglarkaya Date: Thu, 24 Oct 2024 04:19:39 +0300 Subject: [PATCH 129/970] refactor: use op-alloy deposit signature (#12016) --- Cargo.lock | 1 - crates/optimism/chainspec/src/lib.rs | 8 +-- crates/optimism/evm/src/execute.rs | 5 +- crates/primitives/Cargo.toml | 52 ++++++++----------- crates/primitives/src/transaction/mod.rs | 9 ++-- .../primitives/src/transaction/signature.rs | 5 +- 6 files changed, 31 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 155cb3417bd..63f730b1b66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8394,7 +8394,6 @@ dependencies = [ "reth-chainspec", "reth-codecs", "reth-ethereum-forks", - "reth-optimism-chainspec", "reth-primitives-traits", "reth-static-file-types", "reth-testing-utils", diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 98c6589d1ce..83c499de525 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -20,7 +20,7 @@ mod op_sepolia; use alloc::{vec, vec::Vec}; use alloy_chains::Chain; use alloy_genesis::Genesis; -use alloy_primitives::{Parity, Signature, B256, U256}; +use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; use core::fmt::Display; @@ -178,12 +178,6 @@ pub struct OpChainSpec { pub inner: ChainSpec, } -/// Returns the signature for the optimism deposit transactions, which don't include a -/// signature. -pub fn optimism_deposit_tx_signature() -> Signature { - Signature::new(U256::ZERO, U256::ZERO, Parity::Parity(false)) -} - impl EthChainSpec for OpChainSpec { fn chain(&self) -> alloy_chains::Chain { self.inner.chain() diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 748e57e6b33..1cd92409847 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -297,9 +297,10 @@ mod tests { use crate::OpChainSpec; use alloy_consensus::TxEip1559; use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; + use reth_optimism_chainspec::OpChainSpecBuilder; use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -465,7 +466,7 @@ mod tests { gas_limit: MIN_TRANSACTION_GAS, ..Default::default() }), - optimism_deposit_tx_signature(), + TxDeposit::signature(), ); let provider = executor_provider(chain_spec); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 566a114bebf..107c218c758 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -20,9 +20,6 @@ reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } -# op-reth -reth-optimism-chainspec = { workspace = true, optional = true } - # ethereum alloy-consensus.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } @@ -34,14 +31,15 @@ alloy-eips = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-consensus = { workspace = true, features = [ - "arbitrary", + "arbitrary", + "serde", ], optional = true } # crypto secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", + "global-context", + "recovery", + "rand", ], optional = true } k256.workspace = true # for eip-4844 @@ -83,9 +81,9 @@ test-fuzz.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ - "flamegraph", - "frame-pointer", - "criterion", + "flamegraph", + "frame-pointer", + "criterion", ] } [features] @@ -101,13 +99,10 @@ std = [ "once_cell/std", "revm-primitives/std", "secp256k1?/std", - "serde/std" + "serde/std", ] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] -asm-keccak = [ - "alloy-primitives/asm-keccak", - "revm-primitives/asm-keccak" -] +asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", "alloy-eips/arbitrary", @@ -124,38 +119,37 @@ arbitrary = [ "alloy-rpc-types?/arbitrary", "alloy-serde?/arbitrary", "op-alloy-consensus?/arbitrary", - "op-alloy-rpc-types?/arbitrary" + "op-alloy-rpc-types?/arbitrary", ] secp256k1 = ["dep:secp256k1"] c-kzg = [ - "dep:c-kzg", - "alloy-consensus/kzg", - "alloy-eips/kzg", - "revm-primitives/c-kzg", + "dep:c-kzg", + "alloy-consensus/kzg", + "alloy-eips/kzg", + "revm-primitives/c-kzg", ] optimism = [ - "dep:op-alloy-consensus", - "dep:reth-optimism-chainspec", - "reth-codecs?/optimism", - "revm-primitives/optimism", + "dep:op-alloy-consensus", + "reth-codecs?/optimism", + "revm-primitives/optimism", ] alloy-compat = [ - "dep:alloy-rpc-types", - "dep:alloy-serde", - "dep:op-alloy-rpc-types", + "dep:alloy-rpc-types", + "dep:alloy-serde", + "dep:op-alloy-rpc-types", ] test-utils = [ "reth-primitives-traits/test-utils", "reth-chainspec/test-utils", "reth-codecs?/test-utils", - "reth-trie-common/test-utils" + "reth-trie-common/test-utils", ] serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", "op-alloy-consensus?/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "serde_with", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", ] [[bench]] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 1d410da1ea8..7798433d05d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -56,8 +56,6 @@ mod variant; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; -#[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] use tx_type::{ @@ -955,7 +953,7 @@ impl TransactionSignedNoHash { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == optimism_deposit_tx_signature() { + if self.is_legacy() && self.signature == TxDeposit::signature() { return Some(Address::ZERO) } } @@ -1530,7 +1528,7 @@ impl Decodable2718 for TransactionSigned { #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( Transaction::Deposit(TxDeposit::decode(buf)?), - optimism_deposit_tx_signature(), + TxDeposit::signature(), )), } } @@ -1575,8 +1573,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } #[cfg(feature = "optimism")] - let signature = - if transaction.is_deposit() { optimism_deposit_tx_signature() } else { signature }; + let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; Ok(Self::from_transaction_and_signature(transaction, signature)) } diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 39c0f92fda8..5bfdab8e68e 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -4,9 +4,6 @@ use alloy_rlp::{Decodable, Error as RlpError}; pub use alloy_primitives::Signature; -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; - /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. /// @@ -82,7 +79,7 @@ pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { // transactions with an empty signature // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == optimism_deposit_tx_signature() { + if *signature == op_alloy_consensus::TxDeposit::signature() { return Parity::Parity(false) } Parity::NonEip155(signature.v().y_parity()) From 7a06298cf70fec4338cb5014f12a6a561d7962f2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 24 Oct 2024 06:46:07 +0200 Subject: [PATCH 130/970] chore(rpc): make `TransactionCompat::fill` stateful (#11732) --- Cargo.lock | 1 + crates/optimism/rpc/Cargo.toml | 2 +- crates/optimism/rpc/src/eth/mod.rs | 19 +++++++++-- crates/optimism/rpc/src/eth/transaction.rs | 30 ++++++++++++----- crates/optimism/rpc/src/lib.rs | 2 +- crates/rpc/rpc-builder/src/eth.rs | 4 ++- crates/rpc/rpc-builder/src/lib.rs | 9 +++-- crates/rpc/rpc-eth-api/src/core.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 3 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 3 +- .../rpc-eth-api/src/helpers/transaction.rs | 8 +++-- crates/rpc/rpc-eth-api/src/types.rs | 7 ++++ crates/rpc/rpc-eth-types/src/simulate.rs | 4 ++- crates/rpc/rpc-eth-types/src/transaction.rs | 6 ++-- crates/rpc/rpc-types-compat/Cargo.toml | 3 ++ crates/rpc/rpc-types-compat/src/block.rs | 8 +++-- .../rpc-types-compat/src/transaction/mod.rs | 32 ++++++++++++++---- crates/rpc/rpc/src/eth/core.rs | 13 ++++++-- crates/rpc/rpc/src/eth/filter.rs | 21 +++++++----- crates/rpc/rpc/src/eth/helpers/types.rs | 6 +++- crates/rpc/rpc/src/eth/pubsub.rs | 33 ++++++++++++------- crates/rpc/rpc/src/txpool.rs | 28 ++++++++-------- 22 files changed, 168 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63f730b1b66..f749672fe9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8877,6 +8877,7 @@ dependencies = [ "alloy-serde", "reth-primitives", "reth-trie-common", + "serde", "serde_json", ] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index dc0f96c4012..17ebec7ff74 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -56,7 +56,7 @@ serde_json.workspace = true # misc thiserror.workspace = true tracing.workspace = true -derive_more.workspace = true +derive_more = { workspace = true, features = ["constructor", "deref"] } [dev-dependencies] reth-optimism-chainspec.workspace = true diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index d65dd8edd1d..04774a4651c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -39,7 +39,7 @@ use reth_tasks::{ }; use reth_transaction_pool::TransactionPool; -use crate::{OpEthApiError, OpTxBuilder, SequencerClient}; +use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< @@ -59,7 +59,7 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Clone, Deref)] +#[derive(Deref)] pub struct OpEthApi { /// Gateway to node's core components. #[deref] @@ -102,7 +102,11 @@ where { type Error = OpEthApiError; type NetworkTypes = Optimism; - type TransactionCompat = OpTxBuilder; + type TransactionCompat = Self; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + self + } } impl EthApiSpec for OpEthApi @@ -249,3 +253,12 @@ impl fmt::Debug for OpEthApi { f.debug_struct("OpEthApi").finish_non_exhaustive() } } + +impl Clone for OpEthApi +where + N: FullNodeComponents, +{ + fn clone(&self) -> Self { + Self { inner: self.inner.clone(), sequencer_client: self.sequencer_client.clone() } + } +} diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index b7575c24416..4ac2d7e6b74 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -6,7 +6,7 @@ use alloy_rpc_types::TransactionInfo; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; use reth_primitives::TransactionSignedEcRecovered; -use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc::eth::EthTxBuilder; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -88,22 +88,34 @@ where } } -/// Builds OP transaction response type. -#[derive(Clone, Debug, Copy)] -pub struct OpTxBuilder; - -impl TransactionCompat for OpTxBuilder { +impl TransactionCompat for OpEthApi +where + N: FullNodeComponents, +{ type Transaction = Transaction; - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_info: TransactionInfo, + ) -> Self::Transaction { let signed_tx = tx.clone().into_signed(); + let hash = tx.hash; - let mut inner = EthTxBuilder::fill(tx, tx_info).inner; + let mut inner = EthTxBuilder.fill(tx, tx_info).inner; if signed_tx.is_deposit() { inner.gas_price = Some(signed_tx.max_fee_per_gas()) } + let deposit_receipt_version = self + .inner + .provider() + .receipt_by_hash(hash) + .ok() // todo: change sig to return result + .flatten() + .and_then(|receipt| receipt.deposit_receipt_version); + Transaction { inner, source_hash: signed_tx.source_hash(), @@ -111,7 +123,7 @@ impl TransactionCompat for OpTxBuilder { // only include is_system_tx if true: is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) .then_some(true), - deposit_receipt_version: None, // todo: how to fill this field? + deposit_receipt_version, } } diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index e3fef7adb5b..0ff1451d05b 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -15,5 +15,5 @@ pub mod eth; pub mod sequencer; pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; -pub use eth::{transaction::OpTxBuilder, OpEthApi, OpReceiptBuilder}; +pub use eth::{OpEthApi, OpReceiptBuilder}; pub use sequencer::SequencerClient; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 613652678a2..40acecfedf3 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -22,7 +22,7 @@ pub struct EthHandlers { /// Polling based filter handler available on all transports pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) - pub pubsub: EthPubSub, + pub pubsub: EthPubSub, } impl EthHandlers @@ -94,6 +94,7 @@ where ctx.cache.clone(), ctx.config.filter_config(), Box::new(ctx.executor.clone()), + api.tx_resp_builder().clone(), ); let pubsub = EthPubSub::with_spawner( @@ -102,6 +103,7 @@ where ctx.events.clone(), ctx.network.clone(), Box::new(ctx.executor.clone()), + api.tx_resp_builder().clone(), ); Self { api, cache: ctx.cache, filter, pubsub } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index cd93aeb620e..72b53efe674 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1199,9 +1199,12 @@ where .into_rpc() .into(), RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), - RethRpcModule::Txpool => { - TxPoolApi::<_, EthApi>::new(self.pool.clone()).into_rpc().into() - } + RethRpcModule::Txpool => TxPoolApi::new( + self.pool.clone(), + self.eth.api.tx_resp_builder().clone(), + ) + .into_rpc() + .into(), RethRpcModule::Rpc => RPCApi::new( namespaces .iter() diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 20edf96d810..66bc5a44d2d 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -502,7 +502,7 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction::())) + .map(|tx| tx.into_transaction(self.tx_resp_builder()))) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 9bf35d850af..da5f275ef0c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -63,11 +63,12 @@ pub trait EthBlocks: LoadBlock { .map_err(Self::Error::from_eth_err)?; } - let block = from_block::( + let block = from_block( (*block).clone().unseal(), total_difficulty.unwrap_or_default(), full.into(), Some(block_hash), + self.tx_resp_builder(), ) .map_err(Self::Error::from_eth_err)?; Ok(Some(block)) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 0acf6646294..1510233c505 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -191,7 +191,7 @@ pub trait EthCall: Call + LoadPendingBlock { results.push((env.tx.caller, res.result)); } - let block = simulate::build_block::( + let block = simulate::build_block( results, transactions, &block_env, @@ -199,6 +199,7 @@ pub trait EthCall: Call + LoadPendingBlock { total_difficulty, return_full_transactions, &db, + this.tx_resp_builder(), )?; parent_hash = block.inner.header.hash; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d29787d7a23..0d16a5c9145 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -209,9 +209,10 @@ pub trait EthTransactions: LoadTransaction { index: Some(index as u64), }; - return Ok(Some(from_recovered_with_block_context::( + return Ok(Some(from_recovered_with_block_context( tx.clone().with_signer(*signer), tx_info, + self.tx_resp_builder(), ))) } } @@ -237,7 +238,7 @@ pub trait EthTransactions: LoadTransaction { LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered::(transaction.into()))); + return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); } } @@ -288,9 +289,10 @@ pub trait EthTransactions: LoadTransaction { base_fee: base_fee_per_gas.map(u128::from), index: Some(index as u64), }; - from_recovered_with_block_context::( + from_recovered_with_block_context( tx.clone().with_signer(*signer), tx_info, + self.tx_resp_builder(), ) }) }) diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 9ddc23ea32e..653730ed3c9 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -23,12 +23,19 @@ pub trait EthApiTypes: Send + Sync + Clone { type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; + + /// Returns reference to transaction response builder. + fn tx_resp_builder(&self) -> &Self::TransactionCompat; } impl EthApiTypes for () { type Error = EthApiError; type NetworkTypes = AnyNetwork; type TransactionCompat = (); + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + self + } } /// Adapter for network specific transaction type. diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 77db511e625..1d443861d4f 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -172,6 +172,7 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. +#[expect(clippy::too_many_arguments)] pub fn build_block( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -180,6 +181,7 @@ pub fn build_block( total_difficulty: U256, full_transactions: bool, db: &CacheDB>>, + tx_resp_builder: &T, ) -> Result>, EthApiError> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); @@ -304,6 +306,6 @@ pub fn build_block( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block::(block, total_difficulty, txs_kind, None)?; + let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index c3ca1b503ae..7d2237a1b7f 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -41,9 +41,9 @@ impl TransactionSource { } /// Conversion into network specific transaction type. - pub fn into_transaction(self) -> T::Transaction { + pub fn into_transaction(self, resp_builder: &T) -> T::Transaction { match self { - Self::Pool(tx) => from_recovered::(tx), + Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { hash: Some(transaction.hash()), @@ -53,7 +53,7 @@ impl TransactionSource { base_fee: base_fee.map(u128::from), }; - from_recovered_with_block_context::(transaction, tx_info) + from_recovered_with_block_context(transaction, tx_info, resp_builder) } } } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 8e436f0d393..7d5eac9dbb9 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -26,5 +26,8 @@ alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +# io +serde.workspace = true + [dev-dependencies] serde_json.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index a650a69c1c1..8cddc8c4497 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -20,12 +20,15 @@ pub fn from_block( total_difficulty: U256, kind: BlockTransactionsKind, block_hash: Option, + tx_resp_builder: &T, ) -> Result, BlockError> { match kind { BlockTransactionsKind::Hashes => { Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) } - BlockTransactionsKind::Full => from_block_full::(block, total_difficulty, block_hash), + BlockTransactionsKind::Full => { + from_block_full::(block, total_difficulty, block_hash, tx_resp_builder) + } } } @@ -60,6 +63,7 @@ pub fn from_block_full( mut block: BlockWithSenders, total_difficulty: U256, block_hash: Option, + tx_resp_builder: &T, ) -> Result, BlockError> { let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); let block_number = block.block.number; @@ -83,7 +87,7 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info) + from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) }) .collect::>(); diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 7ffd48cb1f7..f8b46454dc2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -2,6 +2,7 @@ mod signature; pub use signature::*; + use std::fmt; use alloy_consensus::Transaction as _; @@ -11,6 +12,7 @@ use alloy_rpc_types::{ }; use alloy_serde::WithOtherFields; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; +use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, /// number, and tx index fields to populate the corresponding fields in the rpc result. @@ -20,21 +22,33 @@ use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; pub fn from_recovered_with_block_context( tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, + resp_builder: &T, ) -> T::Transaction { - T::fill(tx, tx_info) + resp_builder.fill(tx, tx_info) } /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. -pub fn from_recovered(tx: TransactionSignedEcRecovered) -> T::Transaction { - T::fill(tx, TransactionInfo::default()) +pub fn from_recovered( + tx: TransactionSignedEcRecovered, + resp_builder: &T, +) -> T::Transaction { + resp_builder.fill(tx, TransactionInfo::default()) } /// Builds RPC transaction w.r.t. network. pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// RPC transaction response type. - type Transaction: Send + Clone + Default + fmt::Debug; - + type Transaction: Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug; + + /// /// Formats gas price and max fee per gas for RPC transaction response w.r.t. network specific /// transaction type. fn gas_price(signed_tx: &TransactionSigned, base_fee: Option) -> GasPrice { @@ -63,7 +77,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill(tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; + fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar @@ -80,7 +94,11 @@ impl TransactionCompat for () { // `alloy_network::AnyNetwork` type Transaction = WithOtherFields; - fn fill(_tx: TransactionSignedEcRecovered, _tx_info: TransactionInfo) -> Self::Transaction { + fn fill( + &self, + _tx: TransactionSignedEcRecovered, + _tx_info: TransactionInfo, + ) -> Self::Transaction { WithOtherFields::default() } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 5c7fbbd0023..21787873e96 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -36,12 +36,15 @@ use crate::eth::EthTxBuilder; #[derive(Deref)] pub struct EthApi { /// All nested fields bundled together. + #[deref] pub(super) inner: Arc>, + /// Transaction RPC response builder. + pub tx_resp_builder: EthTxBuilder, } impl Clone for EthApi { fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + Self { inner: self.inner.clone(), tx_resp_builder: EthTxBuilder } } } @@ -81,7 +84,7 @@ where proof_permits, ); - Self { inner: Arc::new(inner) } + Self { inner: Arc::new(inner), tx_resp_builder: EthTxBuilder } } } @@ -119,7 +122,7 @@ where ctx.config.proof_permits, ); - Self { inner: Arc::new(inner) } + Self { inner: Arc::new(inner), tx_resp_builder: EthTxBuilder } } } @@ -131,6 +134,10 @@ where // todo: replace with alloy_network::Ethereum type NetworkTypes = AnyNetwork; type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.tx_resp_builder + } } impl std::fmt::Debug diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index b136861c796..24058da1734 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -4,7 +4,6 @@ use std::{ collections::HashMap, fmt, iter::StepBy, - marker::PhantomData, ops::RangeInclusive, sync::Arc, time::{Duration, Instant}, @@ -44,7 +43,7 @@ pub struct EthFilter { /// All nested fields bundled together inner: Arc>>, /// Assembles response data w.r.t. network. - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth::TransactionCompat, } impl Clone for EthFilter @@ -52,7 +51,7 @@ where Eth: EthApiTypes, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), _tx_resp_builder: PhantomData } + Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } } } @@ -76,6 +75,7 @@ where eth_cache: EthStateCache, config: EthFilterConfig, task_spawner: Box, + tx_resp_builder: Eth::TransactionCompat, ) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; @@ -93,7 +93,7 @@ where max_logs_per_response: max_logs_per_response.unwrap_or(usize::MAX), }; - let eth_filter = Self { inner: Arc::new(inner), _tx_resp_builder: PhantomData }; + let eth_filter = Self { inner: Arc::new(inner), tx_resp_builder }; let this = eth_filter.clone(); eth_filter.inner.task_spawner.spawn_critical( @@ -278,7 +278,7 @@ where PendingTransactionFilterKind::Full => { let stream = self.inner.pool.new_pending_pool_transactions_listener(); let full_txs_receiver = - FullTransactionsReceiver::<_, Eth::TransactionCompat>::new(stream); + FullTransactionsReceiver::new(stream, self.tx_resp_builder.clone()); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, ))) @@ -603,7 +603,7 @@ impl PendingTransactionsReceiver { #[derive(Debug, Clone)] struct FullTransactionsReceiver { txs_stream: Arc>>, - _tx_resp_builder: PhantomData, + tx_resp_builder: TxCompat, } impl FullTransactionsReceiver @@ -612,8 +612,8 @@ where TxCompat: TransactionCompat, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. - fn new(stream: NewSubpoolTransactionStream) -> Self { - Self { txs_stream: Arc::new(Mutex::new(stream)), _tx_resp_builder: PhantomData } + fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { + Self { txs_stream: Arc::new(Mutex::new(stream)), tx_resp_builder } } /// Returns all new pending transactions received since the last poll. @@ -625,7 +625,10 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered::(tx.transaction.to_recovered_transaction())) + pending_txs.push(from_recovered( + tx.transaction.to_recovered_transaction(), + &self.tx_resp_builder, + )) } FilterChanges::Transactions(pending_txs) } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index ab7b1a268e0..848bcdc365a 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -21,7 +21,11 @@ where { type Transaction = ::TransactionResponse; - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_info: TransactionInfo, + ) -> Self::Transaction { let signer = tx.signer(); let signed_tx = tx.into_signed(); diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 7bd1fd03d3b..ac962610ef8 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,6 +1,6 @@ //! `eth_` `PubSub` RPC handler implementation -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use alloy_primitives::TxHash; use alloy_rpc_types::{ @@ -17,7 +17,7 @@ use jsonrpsee::{ }; use reth_network_api::NetworkInfo; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, FullEthApiTypes, RpcTransaction}; +use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; @@ -38,7 +38,7 @@ pub struct EthPubSub { inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth, } // === impl EthPubSub === @@ -47,13 +47,20 @@ impl EthPubSub Self { + pub fn new( + provider: Provider, + pool: Pool, + chain_events: Events, + network: Network, + tx_resp_builder: Eth, + ) -> Self { Self::with_spawner( provider, pool, chain_events, network, Box::::default(), + tx_resp_builder, ) } @@ -64,21 +71,22 @@ impl EthPubSub, + tx_resp_builder: Eth, ) -> Self { let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner: Arc::new(inner), subscription_task_spawner, _tx_resp_builder: PhantomData } + Self { inner: Arc::new(inner), subscription_task_spawner, tx_resp_builder } } } #[async_trait::async_trait] -impl EthPubSubApiServer> +impl EthPubSubApiServer for EthPubSub where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, Events: CanonStateSubscriptions + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes + 'static, + Eth: TransactionCompat + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -89,8 +97,9 @@ where ) -> jsonrpsee::core::SubscriptionResult { let sink = pending.accept().await?; let pubsub = self.inner.clone(); + let resp_builder = self.tx_resp_builder.clone(); self.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted::<_, _, _, _, Eth>(pubsub, sink, kind, params).await; + let _ = handle_accepted(pubsub, sink, kind, params, resp_builder).await; })); Ok(()) @@ -103,13 +112,14 @@ async fn handle_accepted( accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, + tx_resp_builder: Eth, ) -> Result<(), ErrorObject<'static>> where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, Events: CanonStateSubscriptions + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes, + Eth: TransactionCompat, { match kind { SubscriptionKind::NewHeads => { @@ -140,10 +150,9 @@ where Params::Bool(true) => { // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered::< - Eth::TransactionCompat, - >( + EthSubscriptionResult::FullTransaction(Box::new(from_recovered( tx.transaction.to_recovered_transaction(), + &tx_resp_builder, ))) }); return pipe_from_stream(accepted_sink, stream).await diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 47aaac0bbfd..d03e10ca75a 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, marker::PhantomData}; +use std::collections::BTreeMap; use alloy_consensus::Transaction; use alloy_primitives::Address; @@ -9,7 +9,6 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; -use reth_rpc_eth_api::{FullEthApiTypes, RpcTransaction}; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; use tracing::trace; @@ -21,33 +20,34 @@ use tracing::trace; pub struct TxPoolApi { /// An interface to interact with the pool pool: Pool, - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth, } impl TxPoolApi { /// Creates a new instance of `TxpoolApi`. - pub const fn new(pool: Pool) -> Self { - Self { pool, _tx_resp_builder: PhantomData } + pub const fn new(pool: Pool, tx_resp_builder: Eth) -> Self { + Self { pool, tx_resp_builder } } } impl TxPoolApi where Pool: TransactionPool + 'static, - Eth: FullEthApiTypes, + Eth: TransactionCompat, { - fn content(&self) -> TxpoolContent> { + fn content(&self) -> TxpoolContent { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, + resp_builder: &RpcTxB, ) where Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered::(tx.clone().into_consensus().into()), + from_recovered(tx.clone().into_consensus().into(), resp_builder), ); } @@ -55,10 +55,10 @@ where let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth::TransactionCompat>(&pending.transaction, &mut content.pending); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder); } for queued in queued { - insert::<_, Eth::TransactionCompat>(&queued.transaction, &mut content.queued); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder); } content @@ -66,10 +66,10 @@ where } #[async_trait] -impl TxPoolApiServer> for TxPoolApi +impl TxPoolApiServer for TxPoolApi where Pool: TransactionPool + 'static, - Eth: FullEthApiTypes + 'static, + Eth: TransactionCompat + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. @@ -131,7 +131,7 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result>> { + ) -> Result> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); Ok(self.content().remove_from(&from)) } @@ -141,7 +141,7 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result>> { + async fn txpool_content(&self) -> Result> { trace!(target: "rpc::eth", "Serving txpool_content"); Ok(self.content()) } From d7f08cd8762dbe81c7c83655f7f94d5ce628daa3 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:09:14 +0700 Subject: [PATCH 131/970] chore: remove some clones (#12008) --- crates/evm/src/system_calls/mod.rs | 4 ++-- crates/optimism/payload/src/builder.rs | 14 ++++---------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index d71dcfedabb..daaf1d1414f 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -171,7 +171,7 @@ where DB::Error: Display, { let result_and_state = eip2935::transact_blockhashes_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, @@ -226,7 +226,7 @@ where DB::Error: Display, { let result_and_state = eip4788::transact_beacon_root_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 3ed00c49aec..a0569049020 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -105,13 +105,7 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload( - self.evm_config.clone(), - args, - cfg_env, - block_env, - self.compute_pending_block, - ) + optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) } fn on_missing_payload( @@ -140,7 +134,7 @@ where best_payload: None, }; let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload(self.evm_config.clone(), args, cfg_env, block_env, false)? + optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) } @@ -156,7 +150,7 @@ where /// a result indicating success with the payload or an error in case of failure. #[inline] pub(crate) fn optimism_payload( - evm_config: EvmConfig, + evm_config: &EvmConfig, args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, @@ -430,7 +424,7 @@ where &mut db, &chain_spec, attributes.payload_attributes.timestamp, - attributes.clone().payload_attributes.withdrawals, + attributes.payload_attributes.withdrawals.clone(), )?; // merge all transitions into bundle state, this would apply the withdrawal balance changes From fcca8b1523fa7c1917754389fd08c9b21eb57987 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Thu, 24 Oct 2024 13:26:25 +0800 Subject: [PATCH 132/970] refactor: BlockchainTestCase::run rm repetitive convert ForkSpec to ChainSpec (#11896) Co-authored-by: Matthias Seitz --- testing/ef-tests/src/cases/blockchain_test.rs | 14 ++++++-------- testing/ef-tests/src/models.rs | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index d29aafa8212..7d80ec6c47f 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -6,6 +6,7 @@ use crate::{ }; use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; +use reth_chainspec::ChainSpec; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec, @@ -83,11 +84,10 @@ impl Case for BlockchainTestCase { .par_bridge() .try_for_each(|case| { // Create a new test database and initialize a provider for the test case. - let provider = create_test_provider_factory_with_chain_spec(Arc::new( - case.network.clone().into(), - )) - .database_provider_rw() - .unwrap(); + let chain_spec: Arc = Arc::new(case.network.into()); + let provider = create_test_provider_factory_with_chain_spec(chain_spec.clone()) + .database_provider_rw() + .unwrap(); // Insert initial test state into the provider. provider.insert_historical_block( @@ -127,9 +127,7 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. let _ = ExecutionStage::new_with_executor( - reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( - case.network.clone().into(), - )), + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(chain_spec), ) .execute( &provider, diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 30e5e5bb20c..b5dc073c1da 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -257,7 +257,7 @@ impl Account { } /// Fork specification. -#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Deserialize)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Deserialize)] pub enum ForkSpec { /// Frontier Frontier, From e04d1b4b4a3a6ab6e4f784ad129a88054bab460e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 24 Oct 2024 08:04:57 +0200 Subject: [PATCH 133/970] perf(net): P2P sink, revert pull/11658 (#11712) --- crates/net/eth-wire/src/p2pstream.rs | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 9882e39787e..76075838bc7 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -614,25 +614,24 @@ where /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); - loop { - match ready!(this.inner.as_mut().poll_flush(cx)) { - Err(err) => { - trace!(target: "net::p2p", - %err, - "error flushing p2p stream" - ); - return Poll::Ready(Err(err.into())) - } - Ok(()) => { + let poll_res = loop { + match this.inner.as_mut().poll_ready(cx) { + Poll::Pending => break Poll::Pending, + Poll::Ready(Err(err)) => break Poll::Ready(Err(err.into())), + Poll::Ready(Ok(())) => { let Some(message) = this.outgoing_messages.pop_front() else { - return Poll::Ready(Ok(())) + break Poll::Ready(Ok(())) }; if let Err(err) = this.inner.as_mut().start_send(message) { - return Poll::Ready(Err(err.into())) + break Poll::Ready(Err(err.into())) } } } - } + }; + + ready!(this.inner.as_mut().poll_flush(cx))?; + + poll_res } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { From 0df3148357cbfb9b811fd04da6a69fca24e59be2 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Thu, 24 Oct 2024 13:29:07 +0700 Subject: [PATCH 134/970] feat(payload): introduce payload freezing for predetermined blocks (#11790) --- crates/payload/basic/src/lib.rs | 74 ++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 19 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 4274d451e43..b8eab3c0fea 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -178,7 +178,7 @@ where deadline, // ticks immediately interval: tokio::time::interval(self.config.interval), - best_payload: None, + best_payload: PayloadState::Missing, pending_block: None, cached_reads, payload_task_guard: self.payload_task_guard.clone(), @@ -324,8 +324,8 @@ where deadline: Pin>, /// The interval at which the job should build a new payload after the last. interval: Interval, - /// The best payload so far. - best_payload: Option, + /// The best payload so far and its state. + best_payload: PayloadState, /// Receiver for the block that is currently being built. pending_block: Option>, /// Restricts how many generator tasks can be executed at once. @@ -362,7 +362,7 @@ where let _cancel = cancel.clone(); let guard = self.payload_task_guard.clone(); let payload_config = self.config.clone(); - let best_payload = self.best_payload.clone(); + let best_payload = self.best_payload.payload().cloned(); self.metrics.inc_initiated_payload_builds(); let cached_reads = self.cached_reads.take().unwrap_or_default(); let builder = self.builder.clone(); @@ -407,8 +407,9 @@ where // check if the interval is reached while this.interval.poll_tick(cx).is_ready() { - // start a new job if there is no pending block and we haven't reached the deadline - if this.pending_block.is_none() { + // start a new job if there is no pending block, we haven't reached the deadline, + // and the payload isn't frozen + if this.pending_block.is_none() && !this.best_payload.is_frozen() { this.spawn_build_job(); } } @@ -420,7 +421,11 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - this.best_payload = Some(payload); + this.best_payload = PayloadState::Best(payload); + } + BuildOutcome::Freeze(payload) => { + debug!(target: "payload_builder", "payload frozen, no further building will occur"); + this.best_payload = PayloadState::Frozen(payload); } BuildOutcome::Aborted { fees, cached_reads } => { this.cached_reads = Some(cached_reads); @@ -459,17 +464,18 @@ where type BuiltPayload = Builder::BuiltPayload; fn best_payload(&self) -> Result { - if let Some(ref payload) = self.best_payload { - return Ok(payload.clone()) + if let Some(payload) = self.best_payload.payload() { + Ok(payload.clone()) + } else { + // No payload has been built yet, but we need to return something that the CL then + // can deliver, so we need to return an empty payload. + // + // Note: it is assumed that this is unlikely to happen, as the payload job is + // started right away and the first full block should have been + // built by the time CL is requesting the payload. + self.metrics.inc_requested_empty_payload(); + self.builder.build_empty_payload(&self.client, self.config.clone()) } - // No payload has been built yet, but we need to return something that the CL then can - // deliver, so we need to return an empty payload. - // - // Note: it is assumed that this is unlikely to happen, as the payload job is started right - // away and the first full block should have been built by the time CL is requesting the - // payload. - self.metrics.inc_requested_empty_payload(); - self.builder.build_empty_payload(&self.client, self.config.clone()) } fn payload_attributes(&self) -> Result { @@ -480,8 +486,7 @@ where &mut self, kind: PayloadKind, ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { - let best_payload = self.best_payload.take(); - + let best_payload = self.best_payload.payload().cloned(); if best_payload.is_none() && self.pending_block.is_none() { // ensure we have a job scheduled if we don't have a best payload yet and none is active self.spawn_build_job(); @@ -545,6 +550,34 @@ where } } +/// Represents the current state of a payload being built. +#[derive(Debug, Clone)] +pub enum PayloadState

{ + /// No payload has been built yet. + Missing, + /// The best payload built so far, which may still be improved upon. + Best(P), + /// The payload is frozen and no further building should occur. + /// + /// Contains the final payload `P` that should be used. + Frozen(P), +} + +impl

PayloadState

{ + /// Checks if the payload is frozen. + pub const fn is_frozen(&self) -> bool { + matches!(self, Self::Frozen(_)) + } + + /// Returns the payload if it exists (either Best or Frozen). + pub const fn payload(&self) -> Option<&P> { + match self { + Self::Missing => None, + Self::Best(p) | Self::Frozen(p) => Some(p), + } + } +} + /// The future that returns the best payload to be served to the consensus layer. /// /// This returns the payload that's supposed to be sent to the CL. @@ -725,6 +758,9 @@ pub enum BuildOutcome { }, /// Build job was cancelled Cancelled, + + /// The payload is final and no further building should occur + Freeze(Payload), } impl BuildOutcome { From 84a30b0404e73d11d16208cc5898afff1b9f4f03 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:53:15 +0200 Subject: [PATCH 135/970] primitive-traits: use alloy `INITIAL_BASE_FEE` constant (#12022) --- Cargo.lock | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 8 ++++---- crates/chain-state/src/test_utils.rs | 9 ++++----- crates/chainspec/src/spec.rs | 6 +++--- crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 2 +- crates/ethereum/evm/src/lib.rs | 4 ++-- crates/primitives-traits/src/constants/mod.rs | 3 --- 8 files changed, 16 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f749672fe9d..e261297ae51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6741,6 +6741,7 @@ name = "reth-consensus-common" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "mockall", "rand 0.8.5", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 95c0361f31f..4468d82052c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1375,6 +1375,7 @@ where mod tests { use super::*; use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; + use alloy_eips::eip1559::INITIAL_BASE_FEE; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{keccak256, Address, Sealable, B256}; use assert_matches::assert_matches; @@ -1386,7 +1387,6 @@ mod tests { use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, Account, BlockBody, Header, Signature, Transaction, TransactionSigned, @@ -1560,7 +1560,7 @@ mod tests { provider_rw.commit().unwrap(); } - let single_tx_cost = U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); + let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { TransactionSigned::from_transaction_and_signature( Transaction::Eip1559(TxEip1559 { @@ -1568,7 +1568,7 @@ mod tests { nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::ZERO.into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, ..Default::default() }), Signature::test_signature(), @@ -1605,7 +1605,7 @@ mod tests { gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), + base_fee_per_gas: Some(INITIAL_BASE_FEE), transactions_root, receipts_root, state_root: state_root_unhashed(HashMap::from([( diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index f1648ab6bff..564df9fe341 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -3,7 +3,7 @@ use crate::{ CanonStateSubscriptions, }; use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -11,7 +11,6 @@ use rand::{thread_rng, Rng}; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, @@ -75,7 +74,7 @@ impl TestBlockBuilder { /// Gas cost of a single transaction generated by the block builder. pub fn single_tx_cost() -> U256 { - U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) + U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) } /// Generates a random [`SealedBlockWithSenders`]. @@ -92,7 +91,7 @@ impl TestBlockBuilder { nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::random().into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, max_priority_fee_per_gas: 1, ..Default::default() }); @@ -136,7 +135,7 @@ impl TestBlockBuilder { gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), + base_fee_per_gas: Some(INITIAL_BASE_FEE), transactions_root: calculate_transaction_root(&transactions), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index bebf7ca2602..b1a23f1fa62 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,7 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::eip7685::EMPTY_REQUESTS_HASH; +use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; @@ -19,7 +19,7 @@ use reth_network_peers::{ sepolia_nodes, NodeRecord, }; use reth_primitives_traits::{ - constants::{EIP1559_INITIAL_BASE_FEE, HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, + constants::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, Header, SealedHeader, }; use reth_trie_common::root::state_root_ref_unhashed; @@ -314,7 +314,7 @@ impl ChainSpec { pub fn initial_base_fee(&self) -> Option { // If the base fee is set in the genesis block, we use that instead of the default. let genesis_base_fee = - self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE); + self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index eaae1301b46..c83312577e9 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -20,6 +20,7 @@ reth-consensus.workspace = true alloy-primitives.workspace = true revm-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true [dev-dependencies] reth-storage-api.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index dabb8c3c34d..1070bbdbc0f 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -204,7 +204,7 @@ pub fn validate_against_parent_eip1559_base_fee Date: Thu, 24 Oct 2024 11:56:05 +0200 Subject: [PATCH 136/970] primitive-traits: rm `ALLOWED_FUTURE_BLOCK_TIME_SECONDS` constant (#12028) --- crates/primitives-traits/src/constants/mod.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 19decfb2d13..6bd21eb8227 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -87,15 +87,6 @@ pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000 /// the database. pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; -/// Max seconds from current time allowed for blocks, before they're considered future blocks. -/// -/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the -/// future. -/// -/// See: -/// -pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; - #[cfg(test)] mod tests { use super::*; From 8bfb7f9ce9b527dbbfc94327519abbafb666a1d8 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:58:50 +0200 Subject: [PATCH 137/970] primitive-traits: use alloy `BEACON_NONCE` constant (#12029) --- crates/ethereum/payload/src/lib.rs | 4 ++-- crates/optimism/payload/src/builder.rs | 2 +- crates/primitives-traits/src/constants/mod.rs | 3 --- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 4 ++-- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 7f94acf723c..951a909b91c 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,7 +10,7 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -25,7 +25,7 @@ use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, + constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, EthereumHardforks, Header, Receipt, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index a0569049020..c85abfad7c4 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_eips::merge::BEACON_NONCE; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; @@ -14,7 +15,6 @@ use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OptimismHardfork; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::BEACON_NONCE, proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, Header, Receipt, TxType, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 6bd21eb8227..c11a6296399 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -9,9 +9,6 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The default block nonce in the beacon consensus -pub const BEACON_NONCE: u64 = 0u64; - /// The minimum tx fee below which the txpool will reject the transaction. /// /// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 407ddf1874a..872f17ee910 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -6,7 +6,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError}; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::eip7685::EMPTY_REQUESTS_HASH; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -17,7 +17,7 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, + constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::calculate_transaction_root, revm_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, From ebd1ed9fb822377d816c798585c0911be45a2b32 Mon Sep 17 00:00:00 2001 From: Deil Urba Date: Thu, 24 Oct 2024 11:49:14 +0100 Subject: [PATCH 138/970] feat(exex): notifications trait (#11972) --- crates/exex/exex/src/context.rs | 2 +- crates/exex/exex/src/notifications.rs | 64 ++++++++++++++++++--------- 2 files changed, 45 insertions(+), 21 deletions(-) diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 9af12e260a7..c4b4f351baa 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -7,7 +7,7 @@ use reth_primitives::Head; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExEvent, ExExNotifications}; +use crate::{ExExEvent, ExExNotifications, ExExNotificationsStream}; /// Captures the context that an `ExEx` has access to. pub struct ExExContext { diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index d0c94d34f64..90a0ee230a4 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -21,6 +21,40 @@ pub struct ExExNotifications { inner: ExExNotificationsInner, } +/// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications +/// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] +/// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. +pub trait ExExNotificationsStream: Stream> + Unpin { + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. + /// + /// It's a no-op if the stream has already been configured without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn set_without_head(&mut self); + + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s with the provided + /// head. + /// + /// It's a no-op if the stream has already been configured with a head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn set_with_head(&mut self, exex_head: ExExHead); + + /// Returns a new [`ExExNotificationsStream`] without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn without_head(self) -> Self + where + Self: Sized; + + /// Returns a new [`ExExNotificationsStream`] with the provided head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn with_head(self, exex_head: ExExHead) -> Self + where + Self: Sized; +} + #[derive(Debug)] enum ExExNotificationsInner { /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. @@ -52,13 +86,14 @@ impl ExExNotifications { )), } } +} - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s without a head. - /// - /// It's a no-op if the stream has already been configured without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn set_without_head(&mut self) { +impl ExExNotificationsStream for ExExNotifications +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ + fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithoutHead(match current { ExExNotificationsInner::WithoutHead(notifications) => notifications, @@ -73,20 +108,12 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn without_head(mut self) -> Self { + fn without_head(mut self) -> Self { self.set_without_head(); self } - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s with the provided head. - /// - /// It's a no-op if the stream has already been configured with a head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn set_with_head(&mut self, exex_head: ExExHead) { + fn set_with_head(&mut self, exex_head: ExExHead) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithHead(match current { ExExNotificationsInner::WithoutHead(notifications) => { @@ -104,10 +131,7 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] with the provided head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn with_head(mut self, exex_head: ExExHead) -> Self { + fn with_head(mut self, exex_head: ExExHead) -> Self { self.set_with_head(exex_head); self } From d50da7fcd65da3c37ad77b7b5f645f12ffc055b6 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:50:43 +0200 Subject: [PATCH 139/970] primitives-traits: use alloy `SEPOLIA_GENESIS_HASH` constant (#12024) --- crates/chainspec/src/spec.rs | 7 ++----- crates/primitives-traits/src/constants/mod.rs | 4 ---- crates/primitives/src/lib.rs | 2 +- crates/storage/db-common/src/init.rs | 4 ++-- 4 files changed, 5 insertions(+), 12 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index b1a23f1fa62..02f4b5ca983 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -8,7 +8,7 @@ use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; -use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH}; +use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, @@ -18,10 +18,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{ - constants::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, - Header, SealedHeader, -}; +use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, Header, SealedHeader}; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index c11a6296399..c5ee6bc3e83 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -60,10 +60,6 @@ pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; /// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; -/// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` -pub const SEPOLIA_GENESIS_HASH: B256 = - b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); - /// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 7a9a6bf457f..4e3f1d3bd24 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,7 +39,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; +pub use constants::HOLESKY_GENESIS_HASH; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index f0695421ec5..014751733e6 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -581,7 +581,7 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::MAINNET_GENESIS_HASH; + use alloy_consensus::constants::{MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; @@ -592,7 +592,7 @@ mod tests { transaction::DbTx, Database, }; - use reth_primitives::{HOLESKY_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use reth_primitives::HOLESKY_GENESIS_HASH; use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, From 2d83f2048937a1ddd9f9646bd513a3e52364af78 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Oct 2024 13:12:13 +0200 Subject: [PATCH 140/970] chore: reuse alloy-primitives logs bloom (#12031) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/log.rs | 15 --------------- crates/primitives/src/receipt.rs | 4 ++-- crates/rpc/rpc-eth-types/src/simulate.rs | 5 +++-- crates/trie/sparse/src/trie.rs | 14 ++++++++++---- 7 files changed, 19 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e261297ae51..ab521843740 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -286,9 +286,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f35429a652765189c1c5092870d8360ee7b7769b09b06d89ebaefd34676446" +checksum = "c71738eb20c42c5fb149571e76536a0f309d142f3957c28791662b96baf77a3d" dependencies = [ "alloy-rlp", "arbitrary", diff --git a/Cargo.toml b/Cargo.toml index 22a78979dfc..e23efdeb315 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -427,7 +427,7 @@ revm-primitives = { version = "13.0.0", features = [ # eth alloy-chains = "0.1.32" alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.7", default-features = false } +alloy-primitives = { version = "0.8.9", default-features = false } alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index a77669ec367..57d1119b035 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -39,7 +39,7 @@ mod error; pub use error::{GotExpected, GotExpectedBoxed}; mod log; -pub use log::{logs_bloom, Log, LogData}; +pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; diff --git a/crates/primitives-traits/src/log.rs b/crates/primitives-traits/src/log.rs index 6e6b4733518..0b445aeeba9 100644 --- a/crates/primitives-traits/src/log.rs +++ b/crates/primitives-traits/src/log.rs @@ -1,18 +1,3 @@ -use alloy_primitives::Bloom; -pub use alloy_primitives::{Log, LogData}; - -/// Calculate receipt logs bloom. -pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { - let mut bloom = Bloom::ZERO; - for log in logs { - bloom.m3_2048(log.address.as_slice()); - for topic in log.topics() { - bloom.m3_2048(topic.as_slice()); - } - } - bloom -} - #[cfg(test)] mod tests { use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256}; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index b117f8d9615..bb6c0841b8c 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,6 +1,6 @@ #[cfg(feature = "reth-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{logs_bloom, TxType}; +use crate::TxType; use alloc::{vec, vec::Vec}; use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, @@ -49,7 +49,7 @@ impl Receipt { /// Calculates [`Log`]'s bloom filter. this is slow operation and [`ReceiptWithBloom`] can /// be used to cache this value. pub fn bloom_slow(&self) -> Bloom { - logs_bloom(self.logs.iter()) + alloy_primitives::logs_bloom(self.logs.iter()) } /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 1d443861d4f..4249c78fe6a 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -9,7 +9,6 @@ use alloy_rpc_types::{ use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; use reth_primitives::{ - logs_bloom, proofs::{calculate_receipt_root, calculate_transaction_root}, BlockBody, BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, TransactionSignedNoHash, @@ -290,7 +289,9 @@ pub fn build_block( receipts_root: calculate_receipt_root(&receipts), transactions_root: calculate_transaction_root(&transactions), state_root, - logs_bloom: logs_bloom(receipts.iter().flat_map(|r| r.receipt.logs.iter())), + logs_bloom: alloy_primitives::logs_bloom( + receipts.iter().flat_map(|r| r.receipt.logs.iter()), + ), mix_hash: block_env.prevrandao.unwrap_or_default(), ..Default::default() }; diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 4d195cbf34c..8d65378f614 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1257,19 +1257,24 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: Vec>>) { + fn test(updates: I) + where + I: IntoIterator, + T: IntoIterator)> + Clone, + { let mut rng = generators::rng(); let mut state = BTreeMap::default(); let mut sparse = RevealedSparseTrie::default(); for update in updates { - let keys_to_delete_len = update.len() / 2; - + let mut count = 0; // Insert state updates into the sparse trie and calculate the root for (key, value) in update.clone() { sparse.update_leaf(key, value).unwrap(); + count += 1; } + let keys_to_delete_len = count / 2; let sparse_root = sparse.root(); // Insert state updates into the hash builder and calculate the root @@ -1329,7 +1334,8 @@ mod tests { ), 1..100, ) - )| { test(updates.into_iter().collect()) }); + )| { + test(updates) }); } /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has From 5e4da59b3a83c869647ad38b6f679f7594c11938 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 24 Oct 2024 15:12:34 +0400 Subject: [PATCH 141/970] feat: Add more complex E2E test (#12005) --- Cargo.lock | 5 + crates/e2e-test-utils/Cargo.toml | 2 + crates/e2e-test-utils/src/engine_api.rs | 51 +++++++--- crates/e2e-test-utils/src/lib.rs | 107 +++++++++++++++++--- crates/e2e-test-utils/src/node.rs | 18 +++- crates/e2e-test-utils/src/traits.rs | 15 ++- crates/ethereum/node/Cargo.toml | 3 + crates/ethereum/node/tests/e2e/p2p.rs | 126 +++++++++++++++++++++++- 8 files changed, 298 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab521843740..3449e381f98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7020,6 +7020,7 @@ dependencies = [ "reth", "reth-chainspec", "reth-db", + "reth-engine-local", "reth-network-peers", "reth-node-builder", "reth-payload-builder", @@ -7033,6 +7034,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "url", ] [[package]] @@ -7966,8 +7968,11 @@ dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", + "alloy-provider", + "alloy-signer", "eyre", "futures", + "rand 0.8.5", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 9fa3e2b60ab..04f031daa58 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -23,9 +23,11 @@ reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true +reth-engine-local.workspace = true # rpc jsonrpsee.workspace = true +url.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index f4aa8fdf5ff..5027b2620a6 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -12,19 +12,22 @@ use reth::{ types::engine::{ForkchoiceState, PayloadStatusEnum}, }, }; +use reth_chainspec::EthereumHardforks; +use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; use reth_rpc_layer::AuthClientService; -use std::marker::PhantomData; +use std::{marker::PhantomData, sync::Arc}; /// Helper for engine api operations #[derive(Debug)] -pub struct EngineApiTestContext { +pub struct EngineApiTestContext { + pub chain_spec: Arc, pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient>, pub _marker: PhantomData, } -impl EngineApiTestContext { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, @@ -51,18 +54,40 @@ impl EngineApiTestContext { ) -> eyre::Result where E::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - // setup payload for submission - let envelope_v3: ::ExecutionPayloadEnvelopeV3 = payload.into(); - // submit payload to engine api - let submission = EngineApiClient::::new_payload_v3( - &self.engine_api_client, - envelope_v3.execution_payload(), - versioned_hashes, - payload_builder_attributes.parent_beacon_block_root().unwrap(), - ) - .await?; + let submission = if self + .chain_spec + .is_prague_active_at_timestamp(payload_builder_attributes.timestamp()) + { + let requests = payload + .executed_block() + .unwrap() + .execution_outcome() + .requests + .first() + .unwrap() + .clone(); + let envelope: ::ExecutionPayloadEnvelopeV4 = payload.into(); + EngineApiClient::::new_payload_v4( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + requests, + ) + .await? + } else { + let envelope: ::ExecutionPayloadEnvelopeV3 = payload.into(); + EngineApiClient::::new_payload_v3( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + ) + .await? + }; assert_eq!(submission.status, expected_status); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 48e56910e6c..f5ee1e5e669 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -11,11 +11,13 @@ use reth::{ }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ - components::NodeComponentsBuilder, rpc::RethRpcAddOns, FullNodeTypesAdapter, Node, NodeAdapter, - NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, RethFullAdapter, + components::NodeComponentsBuilder, rpc::RethRpcAddOns, EngineNodeLauncher, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypesWithDBAdapter, + NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, BlockchainProvider2}; use tracing::{span, Level}; use wallet::Wallet; @@ -102,21 +104,102 @@ where Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) } +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup_engine( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, +) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, +)> +where + N: Default + + Node>>> + + NodeTypesWithEngine, + N::ComponentsBuilder: NodeComponentsBuilder< + TmpNodeAdapter>>, + Components: NodeComponents< + TmpNodeAdapter>>, + Network: PeersHandleProvider, + >, + >, + N::AddOns: RethRpcAddOns>>>, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, + >, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let node_config = NodeConfig::new(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) + .set_dev(is_dev); + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await?; + + let mut node = NodeTestContext::new(node).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + // Type aliases type TmpDB = Arc>; -type TmpNodeAdapter = FullNodeTypesAdapter< - NodeTypesWithDBAdapter, - BlockchainProvider>, ->; +type TmpNodeAdapter>> = + FullNodeTypesAdapter, Provider>; /// Type alias for a `NodeAdapter` -pub type Adapter = NodeAdapter< - RethFullAdapter, - <>>::ComponentsBuilder as NodeComponentsBuilder< - RethFullAdapter, +pub type Adapter>> = NodeAdapter< + TmpNodeAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + TmpNodeAdapter, >>::Components, >; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType = NodeTestContext, AO>; +pub type NodeHelperType>> = + NodeTestContext, AO>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 776a437a58e..f4007201804 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -18,9 +18,10 @@ use reth::{ }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{rpc::RethRpcAddOns, NodeTypesWithEngine}; +use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; use reth_stages_types::StageId; use tokio_stream::StreamExt; +use url::Url; use crate::{ engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, @@ -41,7 +42,10 @@ where /// Context for testing network functionalities. pub network: NetworkTestContext, /// Context for testing the Engine API. - pub engine_api: EngineApiTestContext<::Engine>, + pub engine_api: EngineApiTestContext< + ::Engine, + ::ChainSpec, + >, /// Context for testing RPC features. pub rpc: RpcTestContext, } @@ -63,6 +67,7 @@ where payload: PayloadTestContext::new(builder).await?, network: NetworkTestContext::new(node.network.clone()), engine_api: EngineApiTestContext { + chain_spec: node.chain_spec(), engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, @@ -89,6 +94,7 @@ where ) -> eyre::Result> where Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, { let mut chain = Vec::with_capacity(length as usize); @@ -137,6 +143,8 @@ where where ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: + From + PayloadEnvelopeExt, { let (payload, eth_attr) = self.new_payload(attributes_generator).await?; @@ -236,4 +244,10 @@ where } Ok(()) } + + /// Returns the RPC URL. + pub fn rpc_url(&self) -> Url { + let addr = self.inner.rpc_server_handle().http_local_addr().unwrap(); + format!("http://{}", addr).parse().unwrap() + } } diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index 6786492140b..a70bbf7afb7 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,4 +1,5 @@ -use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV3; +use alloy_rpc_types::engine::ExecutionPayloadEnvelopeV4; +use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; /// The execution payload envelope type. @@ -13,8 +14,20 @@ impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV3 { } } +impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} + impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV3 { fn execution_payload(&self) -> ExecutionPayloadV3 { self.execution_payload.clone() } } + +impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 11555cdc4a5..e7784637a06 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -52,6 +52,9 @@ alloy-genesis.workspace = true tokio.workspace = true serde_json.workspace = true alloy-consensus.workspace = true +alloy-provider.workspace = true +rand.workspace = true +alloy-signer.workspace = true [features] default = [] diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index a40c1b3f4b4..0fae23a0857 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,7 +1,19 @@ use crate::utils::eth_payload_attributes; +use alloy_consensus::TxType; +use alloy_primitives::bytes; +use alloy_provider::{ + network::{ + Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, + }, + Provider, ProviderBuilder, SendableTx, +}; +use alloy_signer::SignerSync; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use reth::rpc::types::TransactionRequest; use reth_chainspec::{ChainSpecBuilder, MAINNET}; -use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; +use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; +use revm::primitives::{AccessListItem, Authorization}; use std::sync::Arc; #[tokio::test] @@ -45,3 +57,115 @@ async fn can_sync() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn e2e_test_send_transactions() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(2, chain_spec.clone(), false).await?; + let mut node = nodes.pop().unwrap(); + let signers = wallet.gen(); + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + + // simple contract which writes to storage on any call + let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); + let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); + + // Produce 100 random blocks with random transactions + for _ in 0..100 { + let tx_count = rng.gen_range(1..20); + + let mut pending = vec![]; + for _ in 0..tx_count { + let signer = signers.choose(&mut rng).unwrap(); + let tx_type = TxType::try_from(rng.gen_range(0..=4)).unwrap(); + + let mut tx = TransactionRequest::default().with_from(signer.address()); + + let should_create = + rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; + if should_create { + tx = tx.into_create().with_input(dummy_bytecode.clone()); + } else { + tx = tx.with_to(*call_destinations.choose(&mut rng).unwrap()).with_input( + (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), + ); + } + + if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { + tx = tx.with_gas_price(provider.get_gas_price().await?); + } + + if rng.gen::() || tx_type == TxType::Eip2930 { + tx = tx.with_access_list( + vec![AccessListItem { + address: *call_destinations.choose(&mut rng).unwrap(), + storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), + }] + .into(), + ); + } + + if tx_type == TxType::Eip7702 { + let signer = signers.choose(&mut rng).unwrap(); + let auth = Authorization { + chain_id: provider.get_chain_id().await?, + address: *call_destinations.choose(&mut rng).unwrap(), + nonce: provider.get_transaction_count(signer.address()).await?, + }; + let sig = signer.sign_hash_sync(&auth.signature_hash())?; + tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) + } + + let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; + let tx = + NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) + .await?; + + pending.push(provider.send_tx_envelope(tx).await?); + } + + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + assert!(payload.block().raw_transactions().len() == tx_count); + + for pending in pending { + let receipt = pending.get_receipt().await?; + if let Some(address) = receipt.contract_address { + call_destinations.push(address); + } + } + } + + let second_node = nodes.pop().unwrap(); + let second_provider = + ProviderBuilder::new().with_recommended_fillers().on_http(second_node.rpc_url()); + + assert_eq!(second_provider.get_block_number().await?, 0); + + let head = provider.get_block_by_number(Default::default(), false).await?.unwrap().header.hash; + second_node.engine_api.update_forkchoice(head, head).await?; + + let start = std::time::Instant::now(); + + while provider.get_block_number().await? != second_provider.get_block_number().await? { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); + } + + Ok(()) +} From 8b317f206f89e5618e0a6ce77a4e999c34597b51 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Oct 2024 14:36:52 +0200 Subject: [PATCH 142/970] fix: exclude withdrawals from uncle block (#12038) --- crates/rpc/rpc-types-compat/src/block.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 8cddc8c4497..a954e05e4f6 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -183,14 +183,14 @@ fn from_block_with_transactions( /// an Uncle from its header. pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { let hash = header.hash_slow(); - let rpc_header = from_primitive_with_hash(SealedHeader::new(header.clone(), hash)); let uncle_block = PrimitiveBlock { header, ..Default::default() }; let size = Some(U256::from(uncle_block.length())); + let rpc_header = from_primitive_with_hash(SealedHeader::new(uncle_block.header, hash)); Block { uncles: vec![], header: rpc_header, transactions: BlockTransactions::Uncle, - withdrawals: Some(vec![]), + withdrawals: None, size, } } From bea6e278dfdb052ce7bbcec5d42349221aa04b84 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 14:29:24 +0200 Subject: [PATCH 143/970] primitive-traits: use alloy `MIN_PROTOCOL_BASE_FEE` constant (#12037) --- crates/node/core/src/args/txpool.rs | 3 +-- crates/primitives-traits/src/constants/mod.rs | 26 +------------------ crates/transaction-pool/src/config.rs | 3 +-- crates/transaction-pool/src/pool/txpool.rs | 4 +-- crates/transaction-pool/src/test_utils/gen.rs | 6 ++--- .../transaction-pool/src/test_utils/mock.rs | 5 ++-- crates/transaction-pool/tests/it/evict.rs | 3 +-- 7 files changed, 10 insertions(+), 40 deletions(-) diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 282313555f7..538315101ad 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,10 +1,9 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; -use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; use clap::Args; -use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index c5ee6bc3e83..6ed88ce592c 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,6 +1,6 @@ //! Ethereum protocol-related constants -use alloy_primitives::{address, b256, Address, B256, U256}; +use alloy_primitives::{address, b256, Address, B256}; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -9,20 +9,6 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The minimum tx fee below which the txpool will reject the transaction. -/// -/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 -/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` -/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because -/// 12.5% of 7 is less than 1. -/// -/// Note that min base fee under different 1559 parameterizations may differ, but there's no -/// significant harm in leaving this setting as is. -pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; - -/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. -pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); - /// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; @@ -79,13 +65,3 @@ pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000 /// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in /// the database. pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn min_protocol_sanity() { - assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); - } -} diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 8fe49f47652..d4518846258 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -3,9 +3,8 @@ use crate::{ PoolSize, TransactionOrigin, }; use alloy_consensus::constants::EIP4844_TX_TYPE_ID; -use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; -use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index a3f192992d1..42de860db79 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -22,9 +22,9 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; -use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::{eip4844::BLOB_TX_MIN_BLOB_GASPRICE, MIN_PROTOCOL_BASE_FEE}; +use reth_primitives::constants::eip4844::BLOB_TX_MIN_BLOB_GASPRICE; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index d51bf80270d..858098ec91a 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,12 +1,10 @@ use crate::EthPooledTransaction; use alloy_consensus::{TxEip1559, TxEip4844, TxLegacy}; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList}; +use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; -use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, Transaction, TransactionSigned, -}; +use reth_primitives::{sign_message, Transaction, TransactionSigned}; /// A generator for transactions for testing purposes. #[derive(Debug)] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 474cf5cc8f8..99b0caaf48a 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -11,7 +11,7 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; -use alloy_eips::eip2930::AccessList; +use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ @@ -19,8 +19,7 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, - transaction::TryFromRecoveredTransactionError, + constants::eip4844::DATA_GAS_PER_BLOB, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index c1d0bbaa642..fea50962fd9 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,9 +1,8 @@ //! Transaction pool eviction tests. -use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::constants::MIN_PROTOCOL_BASE_FEE; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ From 11149d295e195fcdb480fe33e72c8c5c72cf900c Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:13:05 +0200 Subject: [PATCH 144/970] feat(payload builder): transaction pool filter (#10542) Co-authored-by: Matthias Seitz --- Cargo.lock | 2 +- crates/transaction-pool/src/pool/best.rs | 2 +- crates/transaction-pool/src/traits.rs | 30 ++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3449e381f98..c3003c991fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11913,4 +11913,4 @@ checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", -] +] \ No newline at end of file diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 52f25a9db8d..77cd35d8a6b 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -208,7 +208,7 @@ pub struct BestTransactionFilter { impl BestTransactionFilter { /// Create a new [`BestTransactionFilter`] with the given predicate. - pub(crate) const fn new(best: I, predicate: P) -> Self { + pub const fn new(best: I, predicate: P) -> Self { Self { best, predicate } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 00cda8e1cbe..eb9d35ad56c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -814,6 +814,36 @@ impl BestTransactions for std::iter::Empty { fn set_skip_blobs(&mut self, _skip_blobs: bool) {} } +/// A filter that allows to check if a transaction satisfies a set of conditions +pub trait TransactionFilter { + /// The type of the transaction to check. + type Transaction; + + /// Returns true if the transaction satisfies the conditions. + fn is_valid(&self, transaction: &Self::Transaction) -> bool; +} + +/// A no-op implementation of [`TransactionFilter`] which +/// marks all transactions as valid. +#[derive(Debug, Clone)] +pub struct NoopTransactionFilter(std::marker::PhantomData); + +// We can't derive Default because this forces T to be +// Default as well, which isn't necessary. +impl Default for NoopTransactionFilter { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl TransactionFilter for NoopTransactionFilter { + type Transaction = T; + + fn is_valid(&self, _transaction: &Self::Transaction) -> bool { + true + } +} + /// A Helper type that bundles the best transactions attributes together. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct BestTransactionsAttributes { From 53c547663796edc1891bf4c6c5a8b4c0f65597cb Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:20:11 +0200 Subject: [PATCH 145/970] chore(primitive-traits): rm `EIP1559_` constants (#12036) --- crates/primitives-traits/src/constants/mod.rs | 34 ------------------- 1 file changed, 34 deletions(-) diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 6ed88ce592c..f3dd28e1929 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -9,43 +9,9 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; - -/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; - /// Minimum gas limit allowed for transactions. pub const MINIMUM_GAS_LIMIT: u64 = 5000; -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon -/// hardfork. -pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon -/// hardfork. -pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Base Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; - /// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); From e74f185d95df7a18734899826890dbd12811cb36 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Oct 2024 15:28:25 +0200 Subject: [PATCH 146/970] feat: add BestTransactions::filter_transactions (#12041) --- crates/transaction-pool/src/traits.rs | 13 +++++++++++++ crates/transaction-pool/tests/it/best.rs | 10 ++++++++++ crates/transaction-pool/tests/it/main.rs | 2 ++ 3 files changed, 25 insertions(+) create mode 100644 crates/transaction-pool/tests/it/best.rs diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index eb9d35ad56c..fa5e22ec0aa 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -761,6 +761,19 @@ pub trait BestTransactions: Iterator + Send { /// /// If set to true, no blob transactions will be returned. fn set_skip_blobs(&mut self, skip_blobs: bool); + + /// Creates an iterator which uses a closure to determine whether a transaction should be + /// returned by the iterator. + /// + /// All items the closure returns false for are marked as invalid via [`Self::mark_invalid`] and + /// descendant transactions will be skipped. + fn filter_transactions

(self, predicate: P) -> BestTransactionFilter + where + P: FnMut(&Self::Item) -> bool, + Self: Sized, + { + BestTransactionFilter::new(self, predicate) + } } impl BestTransactions for Box diff --git a/crates/transaction-pool/tests/it/best.rs b/crates/transaction-pool/tests/it/best.rs new file mode 100644 index 00000000000..cd7a93eaedb --- /dev/null +++ b/crates/transaction-pool/tests/it/best.rs @@ -0,0 +1,10 @@ +//! Best transaction and filter testing + +use reth_transaction_pool::{noop::NoopTransactionPool, BestTransactions, TransactionPool}; + +#[test] +fn test_best_transactions() { + let noop = NoopTransactionPool::default(); + let mut best = noop.best_transactions().filter_transactions(|_| true); + assert!(best.next().is_none()); +} diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index ead33a328dd..7db2b14c953 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -9,4 +9,6 @@ mod listeners; #[cfg(feature = "test-utils")] mod pending; +mod best; + const fn main() {} From 2fba3c027b7396db868b162e16635197cd584822 Mon Sep 17 00:00:00 2001 From: Deil Urba Date: Thu, 24 Oct 2024 14:32:44 +0100 Subject: [PATCH 147/970] feat: `ExExContext`'s dynamic config (#11983) --- crates/chainspec/src/api.rs | 7 ++- crates/exex/exex/src/context.rs | 21 ++++++- crates/exex/exex/src/dyn_context.rs | 86 +++++++++++++++++++++++++++ crates/exex/exex/src/lib.rs | 3 + crates/exex/exex/src/notifications.rs | 8 ++- crates/node/types/Cargo.toml | 2 +- crates/optimism/chainspec/src/lib.rs | 7 +-- 7 files changed, 121 insertions(+), 13 deletions(-) create mode 100644 crates/exex/exex/src/dyn_context.rs diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index f7061ff18fe..3751789cac8 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -4,7 +4,8 @@ use alloy_chains::Chain; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; -use core::fmt::{Debug, Display}; +use core::fmt::Debug; +use reth_ethereum_forks::DisplayHardforks; use reth_network_peers::NodeRecord; use reth_primitives_traits::Header; @@ -38,7 +39,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn prune_delete_limit(&self) -> usize; /// Returns a string representation of the hardforks. - fn display_hardforks(&self) -> impl Display; + fn display_hardforks(&self) -> DisplayHardforks; /// The genesis header. fn genesis_header(&self) -> &Header; @@ -83,7 +84,7 @@ impl EthChainSpec for ChainSpec { self.prune_delete_limit } - fn display_hardforks(&self) -> impl Display { + fn display_hardforks(&self) -> DisplayHardforks { self.display_hardforks() } diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index c4b4f351baa..70972f9be7f 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -7,7 +7,7 @@ use reth_primitives::Head; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExEvent, ExExNotifications, ExExNotificationsStream}; +use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; /// Captures the context that an `ExEx` has access to. pub struct ExExContext { @@ -55,7 +55,24 @@ where } } -impl ExExContext { +impl ExExContext +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ + /// Returns dynamic version of the context + pub fn into_dyn(self) -> ExExContextDyn { + ExExContextDyn::from(self) + } +} + +impl ExExContext +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { self.components.pool() diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs new file mode 100644 index 00000000000..19d5a0e5062 --- /dev/null +++ b/crates/exex/exex/src/dyn_context.rs @@ -0,0 +1,86 @@ +//! Mirrored version of [`ExExContext`](`crate::ExExContext`) +//! without generic abstraction over [Node](`reth_node_api::FullNodeComponents`) + +use std::{fmt::Debug, sync::Arc}; + +use reth_chainspec::{EthChainSpec, Head}; +use reth_node_api::FullNodeComponents; +use reth_node_core::node_config::NodeConfig; +use tokio::sync::mpsc; + +use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; + +// TODO(0xurb) - add `node` after abstractions +/// Captures the context that an `ExEx` has access to. +pub struct ExExContextDyn { + /// The current head of the blockchain at launch. + pub head: Head, + /// The config of the node + pub config: NodeConfig>, + /// The loaded node config + pub reth_config: reth_config::Config, + /// Channel used to send [`ExExEvent`]s to the rest of the node. + /// + /// # Important + /// + /// The exex should emit a `FinishedHeight` whenever a processed block is safe to prune. + /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what + /// blocks to receive notifications for. + pub events: mpsc::UnboundedSender, + /// Channel to receive [`ExExNotification`](crate::ExExNotification)s. + /// + /// # Important + /// + /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is + /// considered delivered by the node. + pub notifications: Box, +} + +impl Debug for ExExContextDyn { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExContext") + .field("head", &self.head) + .field("config", &self.config) + .field("reth_config", &self.reth_config) + .field("events", &self.events) + .field("notifications", &self.notifications) + .finish() + } +} + +impl From> for ExExContextDyn +where + Node: FullNodeComponents, + Node::Provider: Debug, + Node::Executor: Debug, +{ + fn from(ctx: ExExContext) -> Self { + // convert `NodeConfig` with generic over chainspec into `NodeConfig` + let chain: Arc> = + Arc::new(Box::new(ctx.config.chain) as Box); + let config = NodeConfig { + chain, + datadir: ctx.config.datadir, + config: ctx.config.config, + metrics: ctx.config.metrics, + instance: ctx.config.instance, + network: ctx.config.network, + rpc: ctx.config.rpc, + txpool: ctx.config.txpool, + builder: ctx.config.builder, + debug: ctx.config.debug, + db: ctx.config.db, + dev: ctx.config.dev, + pruning: ctx.config.pruning, + }; + let notifications = Box::new(ctx.notifications) as Box; + + Self { + head: ctx.head, + config, + reth_config: ctx.reth_config, + events: ctx.events, + notifications, + } + } +} diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index edc9e40d449..ce6641ff673 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -40,6 +40,9 @@ pub use backfill::*; mod context; pub use context::*; +mod dyn_context; +pub use dyn_context::*; + mod event; pub use event::*; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 90a0ee230a4..ea112219a3f 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -24,7 +24,9 @@ pub struct ExExNotifications { /// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications /// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] /// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. -pub trait ExExNotificationsStream: Stream> + Unpin { +pub trait ExExNotificationsStream: + Debug + Stream> + Unpin +{ /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. /// /// It's a no-op if the stream has already been configured without a head. @@ -90,8 +92,8 @@ impl ExExNotifications { impl ExExNotificationsStream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Debug + Unpin + 'static, + E: BlockExecutorProvider + Clone + Debug + Unpin + 'static, { fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index b28dcfba591..5747abe9c34 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -16,4 +16,4 @@ reth-chainspec.workspace = true reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-primitives.workspace = true -reth-primitives-traits.workspace = true \ No newline at end of file +reth-primitives-traits.workspace = true diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 83c499de525..03ce75aec04 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -23,7 +23,6 @@ use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; -use core::fmt::Display; use derive_more::{Constructor, Deref, From, Into}; pub use dev::OP_DEV; #[cfg(not(feature = "std"))] @@ -31,8 +30,8 @@ pub(crate) use once_cell::sync::Lazy as LazyLock; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; use reth_chainspec::{ - BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, - EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, + DisplayHardforks, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; @@ -203,7 +202,7 @@ impl EthChainSpec for OpChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> impl Display { + fn display_hardforks(&self) -> DisplayHardforks { self.inner.display_hardforks() } From ba78e439385469925c6fc9df36444f1ef5a6a1bc Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 24 Oct 2024 17:52:13 +0100 Subject: [PATCH 148/970] fix(exex): relax `ExExContext` trait bounds (#12055) --- crates/exex/exex/src/context.rs | 33 +++++++++++++++++++++++++-- crates/exex/exex/src/dyn_context.rs | 2 +- crates/exex/exex/src/notifications.rs | 8 +++---- 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 70972f9be7f..23d772b738a 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -70,8 +70,6 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug, - Node::Executor: Debug, { /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { @@ -123,3 +121,34 @@ where self.notifications.set_with_head(head); } } + +#[cfg(test)] +mod tests { + use reth_exex_types::ExExHead; + use reth_node_api::FullNodeComponents; + + use crate::ExExContext; + + /// + #[test] + const fn issue_12054() { + #[allow(dead_code)] + struct ExEx { + ctx: ExExContext, + } + + impl ExEx { + async fn _test_bounds(mut self) -> eyre::Result<()> { + self.ctx.pool(); + self.ctx.block_executor(); + self.ctx.provider(); + self.ctx.network(); + self.ctx.payload_builder(); + self.ctx.task_executor(); + self.ctx.set_notifications_without_head(); + self.ctx.set_notifications_with_head(ExExHead { block: Default::default() }); + Ok(()) + } + } + } +} diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index 19d5a0e5062..226f3a3feb9 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -43,7 +43,7 @@ impl Debug for ExExContextDyn { .field("config", &self.config) .field("reth_config", &self.reth_config) .field("events", &self.events) - .field("notifications", &self.notifications) + .field("notifications", &"...") .finish() } } diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index ea112219a3f..90a0ee230a4 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -24,9 +24,7 @@ pub struct ExExNotifications { /// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications /// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] /// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. -pub trait ExExNotificationsStream: - Debug + Stream> + Unpin -{ +pub trait ExExNotificationsStream: Stream> + Unpin { /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. /// /// It's a no-op if the stream has already been configured without a head. @@ -92,8 +90,8 @@ impl ExExNotifications { impl ExExNotificationsStream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Debug + Unpin + 'static, - E: BlockExecutorProvider + Clone + Debug + Unpin + 'static, + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); From 777417ad8a1b68d3c1b27e0790cb5be3918ffad1 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 25 Oct 2024 03:34:12 +0900 Subject: [PATCH 149/970] feat: add `reth test-vectors compact --write|--read` (#11954) --- .github/workflows/compact.yml | 42 +++ Cargo.lock | 18 ++ crates/cli/commands/Cargo.toml | 32 ++- .../cli/commands/src/test_vectors/compact.rs | 257 ++++++++++++++++++ crates/cli/commands/src/test_vectors/mod.rs | 26 +- .../cli/commands/src/test_vectors/tables.rs | 6 +- crates/engine/tree/Cargo.toml | 3 +- crates/evm/Cargo.toml | 15 +- crates/net/eth-wire/Cargo.toml | 6 +- crates/optimism/bin/Cargo.toml | 4 + crates/optimism/cli/Cargo.toml | 13 + crates/optimism/cli/src/commands/mod.rs | 7 + .../optimism/cli/src/commands/test_vectors.rs | 72 +++++ crates/optimism/cli/src/lib.rs | 2 + crates/primitives-traits/Cargo.toml | 3 +- crates/primitives/Cargo.toml | 3 +- crates/primitives/src/receipt.rs | 12 + crates/primitives/src/transaction/mod.rs | 6 +- crates/prune/types/Cargo.toml | 11 + crates/prune/types/src/checkpoint.rs | 2 +- crates/prune/types/src/mode.rs | 16 +- crates/revm/Cargo.toml | 9 +- crates/stages/api/Cargo.toml | 3 +- crates/stages/stages/Cargo.toml | 3 +- crates/stages/types/Cargo.toml | 13 + crates/stages/types/src/checkpoints.rs | 18 +- crates/storage/codecs/Cargo.toml | 27 +- .../storage/codecs/src/alloy/access_list.rs | 2 + .../codecs/src/alloy/authorization_list.rs | 9 +- .../codecs/src/alloy/genesis_account.rs | 20 +- crates/storage/codecs/src/alloy/header.rs | 14 +- crates/storage/codecs/src/alloy/mod.rs | 36 ++- crates/storage/codecs/src/alloy/signature.rs | 3 +- .../codecs/src/alloy/transaction/eip1559.rs | 10 +- .../codecs/src/alloy/transaction/eip2930.rs | 8 +- .../codecs/src/alloy/transaction/eip4844.rs | 60 +++- .../codecs/src/alloy/transaction/eip7702.rs | 8 +- .../codecs/src/alloy/transaction/legacy.rs | 10 +- .../codecs/src/alloy/transaction/mod.rs | 82 +++++- .../codecs/src/alloy/transaction/optimism.rs | 8 +- crates/storage/codecs/src/alloy/withdrawal.rs | 8 +- crates/storage/codecs/src/lib.rs | 5 + crates/storage/codecs/src/test_utils.rs | 9 + crates/storage/db-api/Cargo.toml | 9 +- crates/storage/db-models/Cargo.toml | 3 +- crates/storage/db-models/src/accounts.rs | 2 +- crates/storage/db/Cargo.toml | 8 +- crates/storage/provider/Cargo.toml | 7 +- crates/trie/common/Cargo.toml | 3 +- crates/trie/common/src/nibbles.rs | 2 + crates/trie/trie/Cargo.toml | 3 +- 51 files changed, 857 insertions(+), 101 deletions(-) create mode 100644 .github/workflows/compact.yml create mode 100644 crates/cli/commands/src/test_vectors/compact.rs create mode 100644 crates/optimism/cli/src/commands/test_vectors.rs diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml new file mode 100644 index 00000000000..c7435220c0f --- /dev/null +++ b/.github/workflows/compact.yml @@ -0,0 +1,42 @@ +# Ensures that `Compact` codec changes are backwards compatible. +# +# 1) checkout `main` +# 2) randomly generate and serialize to disk many different type vectors with `Compact` (eg. Header, Transaction, etc) +# 3) checkout `pr` +# 4) deserialize previously generated test vectors + +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +name: compact-codec +jobs: + compact-codec: + runs-on: + group: Reth + strategy: + matrix: + bin: + - cargo run --bin reth --features "dev" + - cargo run --bin op-reth --features "optimism dev" --manifest-path crates/optimism/bin/Cargo.toml + steps: + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Checkout base + uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref || 'main' }} + # On `main` branch, generates test vectors and serializes them to disk using `Compact`. + - name: Generate compact vectors + run: ${{ matrix.bin }} -- test-vectors compact --write + - name: Checkout PR + uses: actions/checkout@v4 + with: + clean: false + # On incoming merge try to read and decode previously generated vectors with `Compact` + - name: Read vectors + run: ${{ matrix.bin }} -- test-vectors compact --read diff --git a/Cargo.lock b/Cargo.lock index c3003c991fb..a73a4231533 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6615,6 +6615,7 @@ dependencies = [ "reth-cli", "reth-cli-runner", "reth-cli-util", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -6638,10 +6639,13 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", + "reth-prune-types", "reth-stages", + "reth-stages-types", "reth-static-file", "reth-static-file-types", "reth-trie", + "reth-trie-common", "reth-trie-db", "secp256k1", "serde", @@ -6696,6 +6700,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", + "visibility", ] [[package]] @@ -8090,6 +8095,8 @@ dependencies = [ "clap", "eyre", "futures-util", + "op-alloy-consensus", + "proptest", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -11233,6 +11240,17 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.80", +] + [[package]] name = "wait-timeout" version = "0.2.0" diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 6f4b1008f29..ef66a99410f 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -17,6 +17,7 @@ reth-cli.workspace = true reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true +reth-codecs = { workspace = true, optional = true } reth-config.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } @@ -38,11 +39,14 @@ reth-node-metrics.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true +reth-prune-types = { workspace = true, optional = true } reth-stages.workspace = true +reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } +reth-trie-common = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true @@ -89,14 +93,22 @@ reth-discv4.workspace = true [features] default = [] arbitrary = [ - "dep:proptest", - "dep:arbitrary", - "dep:proptest-arbitrary-interop", - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-eth-wire/arbitrary", - "reth-db/arbitrary", - "reth-chainspec/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs?/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary" ] diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs new file mode 100644 index 00000000000..cda7d5bd578 --- /dev/null +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -0,0 +1,257 @@ +use alloy_primitives::{hex, private::getrandom::getrandom, TxKind}; +use arbitrary::Arbitrary; +use eyre::{Context, Result}; +use proptest::{ + prelude::{ProptestConfig, RngCore}, + test_runner::{TestRng, TestRunner}, +}; +use reth_codecs::alloy::{ + authorization_list::Authorization, + genesis_account::GenesisAccount, + header::{Header, HeaderExt}, + transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }, + withdrawal::Withdrawal, +}; +use reth_db::{ + models::{AccountBeforeTx, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + ClientVersion, +}; +use reth_fs_util as fs; +use reth_primitives::{ + Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, + TransactionSignedNoHash, TxType, Withdrawals, +}; +use reth_prune_types::{PruneCheckpoint, PruneMode}; +use reth_stages_types::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint, + StorageHashingCheckpoint, +}; +use reth_trie::{hash_builder::HashBuilderValue, TrieMask}; +use reth_trie_common::{hash_builder::HashBuilderState, StoredNibbles, StoredNibblesSubKey}; +use std::{fs::File, io::BufReader}; + +pub const VECTORS_FOLDER: &str = "testdata/micro/compact"; +pub const VECTOR_SIZE: usize = 100; + +#[macro_export] +macro_rules! compact_types { + (regular: [$($regular_ty:ident),*], identifier: [$($id_ty:ident),*]) => { + pub const GENERATE_VECTORS: &[fn(&mut TestRunner) -> eyre::Result<()>] = &[ + $( + generate_vector::<$regular_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + $( + generate_vector::<$id_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + ]; + + pub const READ_VECTORS: &[fn() -> eyre::Result<()>] = &[ + $( + read_vector::<$regular_ty> as fn() -> eyre::Result<()>, + )* + $( + read_vector::<$id_ty> as fn() -> eyre::Result<()>, + )* + ]; + + pub static IDENTIFIER_TYPE: std::sync::LazyLock> = std::sync::LazyLock::new(|| { + let mut map = std::collections::HashSet::new(); + $( + map.insert(type_name::<$id_ty>()); + )* + map + }); + }; +} + +// The type that **actually** implements `Compact` should go here. If it's an alloy type, import the +// auxiliary type from reth_codecs::alloy instead. +compact_types!( + regular: [ + // reth-primitives + Account, + Receipt, + Withdrawals, + ReceiptWithBloom, + // reth_codecs::alloy + Authorization, + GenesisAccount, + Header, + HeaderExt, + Withdrawal, + TxEip2930, + TxEip1559, + TxEip4844, + TxEip7702, + TxLegacy, + HashBuilderValue, + LogData, + Log, + // BranchNodeCompact, // todo requires arbitrary + TrieMask, + // TxDeposit, TODO(joshie): optimism + // reth_prune_types + PruneCheckpoint, + PruneMode, + // reth_stages_types + AccountHashingCheckpoint, + StorageHashingCheckpoint, + ExecutionCheckpoint, + HeadersCheckpoint, + IndexHistoryCheckpoint, + EntitiesCheckpoint, + CheckpointBlockRange, + StageCheckpoint, + StageUnitCheckpoint, + // reth_db_api + StoredBlockOmmers, + StoredBlockBodyIndices, + StoredBlockWithdrawals, + // Manual implementations + TransactionSignedNoHash, + // Bytecode, // todo revm arbitrary + StorageEntry, + // MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary + AccountBeforeTx, + ClientVersion, + StoredNibbles, + StoredNibblesSubKey, + // StorageTrieEntry, // todo branchnodecompact arbitrary + // StoredSubNode, // todo branchnodecompact arbitrary + HashBuilderState + ], + // These types require an extra identifier which is usually stored elsewhere (eg. parent type). + identifier: [ + // Signature todo we for v we only store parity(true || false), while v can take more values + Transaction, + TxType, + TxKind + ] +); + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors() -> Result<()> { + generate_vectors_with(GENERATE_VECTORS) +} + +pub fn read_vectors() -> Result<()> { + read_vectors_with(READ_VECTORS) +} + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors_with(gen: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> Result<()> { + // Prepare random seed for test (same method as used by proptest) + let mut seed = [0u8; 32]; + getrandom(&mut seed)?; + println!("Seed for compact test vectors: {:?}", hex::encode_prefixed(seed)); + + // Start the runner with the seed + let config = ProptestConfig::default(); + let rng = TestRng::from_seed(config.rng_algorithm, &seed); + let mut runner = TestRunner::new_with_rng(config, rng); + + fs::create_dir_all(VECTORS_FOLDER)?; + + for generate_fn in gen { + generate_fn(&mut runner)?; + } + + Ok(()) +} + +/// Reads multiple vectors of different types ensuring their correctness by decoding and +/// re-encoding. +pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> { + fs::create_dir_all(VECTORS_FOLDER)?; + + for read_fn in read { + read_fn()?; + } + + Ok(()) +} + +/// Generates test vectors for a specific type `T`. +pub fn generate_vector(runner: &mut TestRunner) -> Result<()> +where + T: for<'a> Arbitrary<'a> + reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + let mut bytes = std::iter::repeat(0u8).take(256).collect::>(); + let mut compact_buffer = vec![]; + + let mut values = Vec::with_capacity(VECTOR_SIZE); + for _ in 0..VECTOR_SIZE { + runner.rng().fill_bytes(&mut bytes); + compact_buffer.clear(); + + let obj = T::arbitrary(&mut arbitrary::Unstructured::new(&bytes))?; + let res = obj.to_compact(&mut compact_buffer); + + if IDENTIFIER_TYPE.contains(&type_name) { + compact_buffer.push(res as u8); + } + + values.push(hex::encode(&compact_buffer)); + } + + serde_json::to_writer( + std::io::BufWriter::new( + std::fs::File::create(format!("{VECTORS_FOLDER}/{}.json", &type_name)).unwrap(), + ), + &values, + )?; + + println!(" ✅"); + + Ok(()) +} + +/// Reads a vector of type `T` from a file and compares each item with its reconstructed version +/// using `T::from_compact`. +pub fn read_vector() -> Result<()> +where + T: reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + // Read the file where the vectors are stored + let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name); + let file = File::open(&file_path).wrap_err_with(|| { + "Failed to open vector. Make sure to run `reth test-vectors compact --write` first." + })?; + let reader = BufReader::new(file); + + let stored_values: Vec = serde_json::from_reader(reader)?; + let mut buffer = vec![]; + + for hex_str in stored_values { + let mut compact_bytes = hex::decode(hex_str)?; + let mut identifier = None; + buffer.clear(); + + if IDENTIFIER_TYPE.contains(&type_name) { + identifier = compact_bytes.pop().map(|b| b as usize); + } + let len_or_identifier = identifier.unwrap_or(compact_bytes.len()); + + let (reconstructed, _) = T::from_compact(&compact_bytes, len_or_identifier); + reconstructed.to_compact(&mut buffer); + assert_eq!(buffer, compact_bytes); + } + + println!(" ✅"); + + Ok(()) +} + +pub fn type_name() -> String { + std::any::type_name::().replace("::", "__") +} diff --git a/crates/cli/commands/src/test_vectors/mod.rs b/crates/cli/commands/src/test_vectors/mod.rs index 999c0bc9132..001d0c2e862 100644 --- a/crates/cli/commands/src/test_vectors/mod.rs +++ b/crates/cli/commands/src/test_vectors/mod.rs @@ -2,7 +2,8 @@ use clap::{Parser, Subcommand}; -mod tables; +pub mod compact; +pub mod tables; /// Generate test-vectors for different data types. #[derive(Debug, Parser)] @@ -19,6 +20,22 @@ pub enum Subcommands { /// List of table names. Case-sensitive. names: Vec, }, + /// Randomly generate test vectors for each `Compact` type using the `--write` flag. + /// + /// The generated vectors are serialized in both `json` and `Compact` formats and saved to a + /// file. + /// + /// Use the `--read` flag to read and validate the previously generated vectors from file. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, } impl Command { @@ -28,6 +45,13 @@ impl Command { Subcommands::Tables { names } => { tables::generate_vectors(names)?; } + Subcommands::Compact { write, .. } => { + if write { + compact::generate_vectors()?; + } else { + compact::read_vectors()?; + } + } } Ok(()) } diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 112685251d0..29ba50c8d83 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,4 +1,4 @@ -use alloy_primitives::private::getrandom::getrandom; +use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; use proptest::{ @@ -17,11 +17,11 @@ const VECTORS_FOLDER: &str = "testdata/micro/db"; const PER_TABLE: usize = 1000; /// Generates test vectors for specified `tables`. If list is empty, then generate for all tables. -pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { +pub fn generate_vectors(mut tables: Vec) -> Result<()> { // Prepare random seed for test (same method as used by proptest) let mut seed = [0u8; 32]; getrandom(&mut seed)?; - println!("Seed for test vectors: {:?}", seed); + println!("Seed for table test vectors: {:?}", hex::encode_prefixed(seed)); // Start the runner with the seed let config = ProptestConfig::default(); diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 6fe741db883..dee0bcaf7ce 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -95,5 +95,6 @@ test-utils = [ "reth-revm/test-utils", "reth-stages-api/test-utils", "reth-provider/test-utils", - "reth-trie/test-utils" + "reth-trie/test-utils", + "reth-prune-types?/test-utils" ] diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index d97a5786419..90fd532828e 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -57,11 +57,12 @@ std = [ "revm/std", ] test-utils = [ - "dep:parking_lot", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "revm/test-utils", + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" ] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index b0e256fdf63..83a3e163ebc 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -71,7 +71,8 @@ arbitrary = [ "dep:arbitrary", "reth-chainspec/arbitrary", "alloy-eips/arbitrary", - "alloy-primitives/arbitrary" + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" ] serde = [ "dep:serde", @@ -80,7 +81,8 @@ serde = [ "alloy-primitives/serde", "bytes/serde", "rand/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-codecs/serde" ] [[test]] diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index f60ef36a466..77166763100 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -47,6 +47,10 @@ optimism = [ "reth-provider/optimism" ] +dev = [ + "reth-optimism-cli/dev" +] + min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] min-info-logs = ["tracing/release_max_level_info"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 7db41ccbe84..a2ba71214f5 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -65,6 +65,13 @@ tokio-util = { workspace = true, features = ["codec"] } tracing.workspace = true eyre.workspace = true +# reth test-vectors +proptest = { workspace = true, optional = true } +op-alloy-consensus = { workspace = true, features = [ + "arbitrary", +], optional = true } + + [dev-dependencies] tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } @@ -94,3 +101,9 @@ jemalloc = [ "reth-node-core/jemalloc", "reth-node-metrics/jemalloc" ] + +dev = [ + "dep:proptest", + "reth-cli-commands/arbitrary", + "op-alloy-consensus" +] diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index a7674ec2c9b..d51f8993296 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -16,6 +16,9 @@ pub mod import; pub mod import_receipts; pub mod init_state; +#[cfg(feature = "dev")] +pub mod test_vectors; + /// Commands to be executed #[derive(Debug, Subcommand)] pub enum Commands @@ -55,4 +58,8 @@ pub enum Commands), + /// Generate Test Vectors + #[cfg(feature = "dev")] + #[command(name = "test-vectors")] + TestVectors(test_vectors::Command), } diff --git a/crates/optimism/cli/src/commands/test_vectors.rs b/crates/optimism/cli/src/commands/test_vectors.rs new file mode 100644 index 00000000000..093d63148ee --- /dev/null +++ b/crates/optimism/cli/src/commands/test_vectors.rs @@ -0,0 +1,72 @@ +//! Command for generating test vectors. + +use clap::{Parser, Subcommand}; +use op_alloy_consensus::TxDeposit; +use proptest::test_runner::TestRunner; +use reth_cli_commands::{ + compact_types, + test_vectors::{ + compact, + compact::{ + generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS, + READ_VECTORS as ETH_READ_VECTORS, + }, + tables, + }, +}; + +/// Generate test-vectors for different data types. +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +#[derive(Subcommand, Debug)] +/// `reth test-vectors` subcommands +pub enum Subcommands { + /// Generates test vectors for specified tables. If no table is specified, generate for all. + Tables { + /// List of table names. Case-sensitive. + names: Vec, + }, + /// Generates test vectors for `Compact` types with `--write`. Reads and checks generated + /// vectors with `--read`. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, +} + +impl Command { + /// Execute the command + pub async fn execute(self) -> eyre::Result<()> { + match self.command { + Subcommands::Tables { names } => { + tables::generate_vectors(names)?; + } + Subcommands::Compact { write, .. } => { + compact_types!( + regular: [ + TxDeposit + ], identifier: [] + ); + + if write { + compact::generate_vectors_with(ETH_GENERATE_VECTORS)?; + compact::generate_vectors_with(GENERATE_VECTORS)?; + } else { + compact::read_vectors_with(ETH_READ_VECTORS)?; + compact::read_vectors_with(READ_VECTORS)?; + } + } + } + Ok(()) + } +} diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 235b4455969..43d12616484 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -169,6 +169,8 @@ where runner.run_command_until_exit(|ctx| command.execute::(ctx)) } Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), } } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 9634da40f47..4319232f824 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -74,7 +74,8 @@ arbitrary = [ "dep:proptest", "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", - "revm-primitives/arbitrary" + "revm-primitives/arbitrary", + "reth-codecs/arbitrary" ] serde-bincode-compat = [ "serde_with", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 107c218c758..5e761f41fe2 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -62,7 +62,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # eth reth-chainspec.workspace = true -reth-codecs.workspace = true +reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } @@ -120,6 +120,7 @@ arbitrary = [ "alloy-serde?/arbitrary", "op-alloy-consensus?/arbitrary", "op-alloy-rpc-types?/arbitrary", + "reth-codecs?/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index bb6c0841b8c..940b491e335 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -503,6 +503,18 @@ mod tests { use super::*; use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + #[test] + fn test_decode_receipt() { + #[cfg(not(feature = "optimism"))] + reth_codecs::test_utils::test_decode::(&hex!( + "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" + )); + #[cfg(feature = "optimism")] + reth_codecs::test_utils::test_decode::(&hex!( + "c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24" + )); + } + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 #[test] fn encode_legacy_receipt() { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 7798433d05d..b09fff9e2b6 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -649,10 +649,12 @@ impl reth_codecs::Compact for Transaction { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), } } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), } } } diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 13def8eaa8b..5446d6f76ff 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -20,6 +20,7 @@ derive_more.workspace = true modular-bitfield.workspace = true serde.workspace = true thiserror.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -29,3 +30,13 @@ proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true + +[features] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" +] diff --git a/crates/prune/types/src/checkpoint.rs b/crates/prune/types/src/checkpoint.rs index f654fba7df1..e0397c5afc8 100644 --- a/crates/prune/types/src/checkpoint.rs +++ b/crates/prune/types/src/checkpoint.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; /// Saves the pruning progress of a stage. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(Default, arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(Default, arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct PruneCheckpoint { /// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet. diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 3465882993b..9a8e55bb383 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Prune mode. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] #[serde(rename_all = "lowercase")] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub enum PruneMode { /// Prune all blocks. @@ -17,6 +17,13 @@ pub enum PruneMode { Before(BlockNumber), } +#[cfg(any(test, feature = "test-utils"))] +impl Default for PruneMode { + fn default() -> Self { + Self::Full + } +} + impl PruneMode { /// Prune blocks up to the specified block number. The specified block number is also pruned. /// @@ -69,13 +76,6 @@ impl PruneMode { } } -#[cfg(test)] -impl Default for PruneMode { - fn default() -> Self { - Self::Full - } -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 3f2a39a0b44..3ee68010108 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -43,10 +43,11 @@ std = [ "alloy-consensus/std", ] test-utils = [ - "dep:reth-trie", - "reth-primitives/test-utils", - "reth-trie?/test-utils", - "revm/test-utils", + "dep:reth-trie", + "reth-primitives/test-utils", + "reth-trie?/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" ] serde = [ "revm/serde", diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index cba569a2a43..88a8e3b96d1 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -50,5 +50,6 @@ test-utils = [ "reth-consensus/test-utils", "reth-network-p2p/test-utils", "reth-primitives-traits/test-utils", - "reth-provider/test-utils" + "reth-provider/test-utils", + "reth-stages-types/test-utils" ] diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 0b26cb6a1e7..65bb2637b62 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -110,7 +110,8 @@ test-utils = [ "reth-codecs/test-utils", "reth-db-api/test-utils", "reth-trie-db/test-utils", - "reth-trie/test-utils" + "reth-trie/test-utils", + "reth-prune-types/test-utils" ] [[bench]] diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 54b14b335cb..a466b21b6f9 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -19,6 +19,7 @@ alloy-primitives.workspace = true modular-bitfield.workspace = true bytes.workspace = true serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -26,3 +27,15 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true + +[features] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils", + "reth-trie-common/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary", + "reth-trie-common/arbitrary" +] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 79e896bf4d9..87225f1eec4 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -76,7 +76,7 @@ impl Compact for MerkleCheckpoint { /// Saves the progress of AccountHashing stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct AccountHashingCheckpoint { /// The next account to start hashing from. @@ -89,7 +89,7 @@ pub struct AccountHashingCheckpoint { /// Saves the progress of StorageHashing stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StorageHashingCheckpoint { /// The next account to start hashing from. @@ -104,7 +104,7 @@ pub struct StorageHashingCheckpoint { /// Saves the progress of Execution stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct ExecutionCheckpoint { /// Block range which this checkpoint is valid for. @@ -115,7 +115,7 @@ pub struct ExecutionCheckpoint { /// Saves the progress of Headers stage. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct HeadersCheckpoint { /// Block range which this checkpoint is valid for. @@ -126,7 +126,7 @@ pub struct HeadersCheckpoint { /// Saves the progress of Index History stages. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct IndexHistoryCheckpoint { /// Block range which this checkpoint is valid for. @@ -137,7 +137,7 @@ pub struct IndexHistoryCheckpoint { /// Saves the progress of abstract stage iterating over or downloading entities. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct EntitiesCheckpoint { /// Number of entities already processed. @@ -166,7 +166,7 @@ impl EntitiesCheckpoint { /// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across /// multiple executions. #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct CheckpointBlockRange { /// The first block of the range, inclusive. @@ -189,7 +189,7 @@ impl From<&RangeInclusive> for CheckpointBlockRange { /// Saves the progress of a stage. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StageCheckpoint { /// The maximum block processed by the stage. @@ -256,7 +256,7 @@ impl StageCheckpoint { // is not a Copy type. /// Stage-specific checkpoint metrics. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub enum StageUnitCheckpoint { /// Saves the progress of AccountHashing stage. diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 2525b4e8d7f..20a0673dff6 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -27,6 +27,9 @@ op-alloy-consensus = { workspace = true, optional = true } # misc bytes.workspace = true modular-bitfield = { workspace = true, optional = true } +visibility = { version = "0.1.1", optional = true} +serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = [ @@ -45,7 +48,6 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -serde.workspace = true [features] default = ["std", "alloy"] @@ -66,4 +68,25 @@ alloy = [ "dep:alloy-trie", ] optimism = ["alloy", "dep:op-alloy-consensus"] -test-utils = [] +test-utils = [ + "std", + "alloy", + "arbitrary", + "dep:visibility", + "dep:arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips?/serde", + "alloy-primitives/serde", + "alloy-trie?/serde", + "bytes/serde", + "op-alloy-consensus?/serde" +] +arbitrary = [ + "alloy-consensus?/arbitrary", + "alloy-eips?/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie?/arbitrary", + "op-alloy-consensus?/arbitrary" +] diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index 306b64d7e4b..304b6bd388c 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AccessList`] + use crate::Compact; use alloc::vec::Vec; use alloy_eips::eip2930::{AccessList, AccessListItem}; diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 6dc36956d24..3fc9518a637 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -1,16 +1,21 @@ -use core::ops::Deref; +//! Compact implementation for [`AlloyAuthorization`] use crate::Compact; use alloy_eips::eip7702::{Authorization as AlloyAuthorization, SignedAuthorization}; use alloy_primitives::{Address, U256}; use bytes::Buf; +use core::ops::Deref; use reth_codecs_derive::add_arbitrary_tests; /// Authorization acts as bridge which simplifies Compact implementation for AlloyAuthorization. /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct Authorization { chain_id: u64, diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index 938ad1375b1..b29fe526df4 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyGenesisAccount`] + use crate::Compact; use alloc::vec::Vec; use alloy_genesis::GenesisAccount as AlloyGenesisAccount; @@ -22,8 +24,14 @@ pub(crate) struct GenesisAccountRef<'a> { private_key: Option<&'a B256>, } +/// Acts as bridge which simplifies Compact implementation for +/// `AlloyGenesisAccount`. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. @@ -39,14 +47,20 @@ pub(crate) struct GenesisAccount { } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] #[add_arbitrary_tests(compact)] pub(crate) struct StorageEntries { entries: Vec, } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] #[add_arbitrary_tests(compact)] pub(crate) struct StorageEntry { key: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 90e67b1e312..78f2029c32e 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyHeader`] + use crate::Compact; use alloy_consensus::Header as AlloyHeader; use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; @@ -10,7 +12,11 @@ use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; /// will automatically apply to this type. /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::Header`] -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] pub(crate) struct Header { parent_hash: B256, @@ -42,7 +48,11 @@ pub(crate) struct Header { /// used as a field of [`Header`] for backwards compatibility. /// /// More information: & [`reth_codecs_derive::Compact`]. -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] pub(crate) struct HeaderExt { requests_hash: Option, diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index ed77876c5ce..f1bf6a00e69 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,13 +1,29 @@ -mod access_list; -mod authorization_list; -mod genesis_account; -mod header; -mod log; -mod signature; -mod transaction; -mod trie; -mod txkind; -mod withdrawal; +//! Implements Compact for alloy types. + +/// Will make it a pub mod if test-utils is enabled +macro_rules! cond_mod { + ($($mod_name:ident),*) => { + $( + #[cfg(feature = "test-utils")] + pub mod $mod_name; + #[cfg(not(feature = "test-utils"))] + mod $mod_name; + )* + }; +} + +cond_mod!( + access_list, + authorization_list, + genesis_account, + header, + log, + signature, + transaction, + trie, + txkind, + withdrawal +); #[cfg(test)] mod tests { diff --git a/crates/storage/codecs/src/alloy/signature.rs b/crates/storage/codecs/src/alloy/signature.rs index 70290ea96c1..0cc4774d0f8 100644 --- a/crates/storage/codecs/src/alloy/signature.rs +++ b/crates/storage/codecs/src/alloy/signature.rs @@ -1,6 +1,7 @@ -use alloy_primitives::{Parity, Signature, U256}; +//! Compact implementation for [`Signature`] use crate::Compact; +use alloy_primitives::{Parity, Signature, U256}; impl Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 8e7594951fa..0e7f44cdec1 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip1559`] + use crate::Compact; use alloy_consensus::TxEip1559 as AlloyTxEip1559; use alloy_eips::eip2930::AccessList; @@ -11,8 +13,12 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxEip1559 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index e0c78a3e4c0..75cab9e8a09 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip2930`] + use crate::Compact; use alloy_consensus::TxEip2930 as AlloyTxEip2930; use alloy_eips::eip2930::AccessList; @@ -13,7 +15,11 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 27c6b924090..5ec36e06bf5 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip4844`] + use crate::{Compact, CompactPlaceholder}; use alloc::vec::Vec; use alloy_consensus::TxEip4844 as AlloyTxEip4844; @@ -14,7 +16,8 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "test-utils"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, @@ -25,6 +28,13 @@ pub(crate) struct TxEip4844 { /// TODO(debt): this should be removed if we break the DB. /// Makes sure that the Compact bitflag struct has one bit after the above field: /// + #[cfg_attr( + feature = "test-utils", + serde( + serialize_with = "serialize_placeholder", + deserialize_with = "deserialize_placeholder" + ) + )] placeholder: Option, to: Address, value: U256, @@ -75,6 +85,54 @@ impl Compact for AlloyTxEip4844 { } } +#[cfg(any(test, feature = "test-utils"))] +impl<'a> arbitrary::Arbitrary<'a> for TxEip4844 { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { + chain_id: ChainId::arbitrary(u)?, + nonce: u64::arbitrary(u)?, + gas_limit: u64::arbitrary(u)?, + max_fee_per_gas: u128::arbitrary(u)?, + max_priority_fee_per_gas: u128::arbitrary(u)?, + // Should always be Some for TxEip4844 + placeholder: Some(()), + to: Address::arbitrary(u)?, + value: U256::arbitrary(u)?, + access_list: AccessList::arbitrary(u)?, + blob_versioned_hashes: Vec::::arbitrary(u)?, + max_fee_per_blob_gas: u128::arbitrary(u)?, + input: Bytes::arbitrary(u)?, + }) + } +} + +#[cfg(any(test, feature = "test-utils"))] +fn serialize_placeholder(value: &Option<()>, serializer: S) -> Result +where + S: serde::Serializer, +{ + // Required otherwise `serde_json` will serialize it as null and would be `None` when decoding + // it again. + match value { + Some(()) => serializer.serialize_str("placeholder"), // Custom serialization + None => serializer.serialize_none(), + } +} + +#[cfg(any(test, feature = "test-utils"))] +fn deserialize_placeholder<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + use serde::de::Deserialize; + let s: Option = Option::deserialize(deserializer)?; + match s.as_deref() { + Some("placeholder") => Ok(Some(())), + None => Ok(None), + _ => Err(serde::de::Error::custom("unexpected value")), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index e714be1c3f6..8acf59425f2 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip7702`] + use crate::Compact; use alloc::vec::Vec; use alloy_consensus::TxEip7702 as AlloyTxEip7702; @@ -14,7 +16,11 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 27e799a790e..c83626aa4cf 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -1,11 +1,17 @@ +//! Compact implementation for [`AlloyTxLegacy`] + use crate::Compact; use alloy_consensus::TxLegacy as AlloyTxLegacy; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// Legacy transaction. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize), + crate::add_arbitrary_tests(compact) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxLegacy { /// Added as EIP-155: Simple replay attack protection chain_id: Option, diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 5b1d173a528..dc27eacfacc 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -1,10 +1,18 @@ -pub(crate) mod eip1559; -pub(crate) mod eip2930; -pub(crate) mod eip4844; -pub(crate) mod eip7702; -pub(crate) mod legacy; -#[cfg(feature = "optimism")] -pub(crate) mod optimism; +//! Compact implementation for transaction types + +cond_mod!( + eip1559, + eip2930, + eip4844, + eip7702, + legacy +); + + +#[cfg(all(feature = "test-utils", feature = "optimism"))] +pub mod optimism; +#[cfg(all(not(feature = "test-utils"), feature = "optimism"))] +mod optimism; #[cfg(test)] mod tests { @@ -15,9 +23,13 @@ mod tests { // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility - use crate::alloy::transaction::{ - eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, - legacy::TxLegacy, + use alloy_primitives::hex; + use crate::{ + alloy::{header::Header, transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }}, + test_utils::test_decode, }; #[test] @@ -34,4 +46,54 @@ mod tests { fn test_ensure_backwards_compatibility_optimism() { assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); } + + #[test] + fn test_decode_header() { + test_decode::

(&hex!( + "01000000fbbb564baeafd064b979c2ac032df5cd987098066a8c6969514dfb8ecfbf043e667fa19efcc00d1dd197c309a3cc42dec820cd627af8f7f38f3274f842406891b22624431d0ea858422db8415b1181f8d19befbd21287debaf98a94e84b3ec20be846f35abfbf743ee3eda4fdda6a6f9124d295da97e26eaa1cedd09936f0a3c560b6bc10316dba5e82abd21afcf519a985feb09a6ce7fba2e8163b10f06c99828b8049c29b993d88d1d112dca60a03ebd8ebc6d69a7e1f301ca6d67c21fe0949d67bca251edf36c96a2cf7c84d98fc60a53988ac95820f434eb35280d98c8ba4d7484e7ee8fefd63591ad4c937ccaaea23871d05c77bac754c5759b34cf9b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + )); + } + + #[test] + fn test_decode_eip1559() { + test_decode::(&hex!( + "88086110b81b05bc5bb59ec3e4cd44e895a9dcb2656d5003e2f64ecb2e15443898cc1cc19af19ca96fc2b4eafc4abc26e4bbd70a3ddb10b7530b65eea128f4095c97164f712c04239902c1b08acf3949d4687123cdd72d5c73df113d2dc6ed7e519f410ace5553ca805975240a208b57013532de78c5cb407423ea11921ab11b13e93ef35d4d01c9a23166c4d627987545fe4675528d0ab111b0a1dc83fba0a4e1cd5c826a94db3f" + )); + } + + #[test] + fn test_decode_eip2930() { + test_decode::(&hex!( + "7810833fce14e3e2921e94fd3727eb71e91551d2c1e029697a654bfab510f3963aa57074015e152065d1c807f8830079fb0aeadc251d248eaec7147e78580ed638c4e667827775e24270edd5aad475776533ece65373afa71722bfeba3c900" + )); + } + + #[test] + fn test_decode_eip4844() { + test_decode::(&hex!( + "88086110025c359180ea680b5007c856f9e1ad4d1be7a5019feb42133f4fc4bdf74da1b457ab787462385a28a1bf8edb401adabf3ff21ac18f695e30180348ea67246fc4dc25e88add12b7c317651a0ce08946d98dbbe5b38883aa758a0f247e23b0fe3ac1bcc43d7212c984d6ccc770d70135890c9a07d715cacb9032c90d539d0b3d209a8d600178bcfb416fd489e5d5dd56d9cfc6addae810ae70bdaee65672b871dc2b3f35ec00dbaa0d872f78cb58b3199984c608c8ba" + )); + } + + #[test] + fn test_decode_eip7702() { + test_decode::(&hex!( + "8808210881415c034feba383d7a6efd3f2601309b33a6d682ad47168cac0f7a5c5136a33370e5e7ca7f570d5530d7a0d18bf5eac33583fdc27b6580f61e8cbd34d6de596f925c1f353188feb2c1e9e20de82a80b57f0be425d8c5896280d4f5f66cdcfba256d0c9ac8abd833859a62ec019501b4585fa176f048de4f88b93bdefecfcaf4d8f0dd04767bc683a4569c893632e44ba9d53f90d758125c9b24c0192a649166520cd5eecbc110b53eda400cf184b8ef9932c81d0deb2ea27dfa863392a87bfd53af3ec67379f20992501e76e387cbe3933861beead1b49649383cf8b2a2d5c6d04b7edc376981ed9b12cf7199fe7fabf5198659e001bed40922969b82a6cd000000000000" + )); + } + + #[test] + fn test_decode_legacy() { + test_decode::(&hex!( + "112210080a8ba06a8d108540bb3140e9f71a0812c46226f9ea77ae880d98d19fe27e5911801175c3b32620b2e887af0296af343526e439b775ee3b1c06750058e9e5fc4cd5965c3010f86184" + )); + } + + #[cfg(feature = "optimism")] + #[test] + fn test_decode_deposit() { + test_decode::(&hex!( + "8108ac8f15983d59b6ae4911a00ff7bfcd2e53d2950926f8c82c12afad02861c46fcb293e776204052725e1c08ff2e9ff602ca916357601fa972a14094891fe3598b718758f22c46f163c18bcaa6296ce87e5267ef3fd932112842fbbf79011548cdf067d93ce6098dfc0aaf5a94531e439f30d6dfd0c6" + )); + } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index f4fdcf5ee44..22f508fd4ce 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxDeposit`] + use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::TxDeposit as AlloyTxDeposit; @@ -12,7 +14,11 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`op_alloy_consensus::TxDeposit`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct TxDeposit { source_hash: B256, diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 16324c280cc..0f3347cec1a 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyWithdrawal`] + use crate::Compact; use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; use alloy_primitives::Address; @@ -7,7 +9,11 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[add_arbitrary_tests(compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index c432400a576..598f2131bde 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub use reth_codecs_derive::*; +use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; @@ -25,6 +26,10 @@ use bytes::{Buf, BufMut}; extern crate alloc; use alloc::vec::Vec; +#[cfg(feature = "test-utils")] +pub mod alloy; + +#[cfg(not(feature = "test-utils"))] #[cfg(any(test, feature = "alloy"))] mod alloy; diff --git a/crates/storage/codecs/src/test_utils.rs b/crates/storage/codecs/src/test_utils.rs index bb377c69167..b845645cb1a 100644 --- a/crates/storage/codecs/src/test_utils.rs +++ b/crates/storage/codecs/src/test_utils.rs @@ -79,3 +79,12 @@ impl UnusedBits { matches!(self, Self::NotZero) } } + +/// Tests decoding and re-encoding to ensure correctness. +pub fn test_decode(buf: &[u8]) { + let (decoded, _) = T::from_compact(buf, buf.len()); + let mut encoded = Vec::with_capacity(buf.len()); + + decoded.to_compact(&mut encoded); + assert_eq!(buf, &encoded[..]); +} diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 932a94b98eb..f827e48c8c3 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -62,7 +62,9 @@ test-utils = [ "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-models/test-utils", - "reth-trie-common/test-utils" + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" ] arbitrary = [ "reth-primitives/arbitrary", @@ -72,7 +74,10 @@ arbitrary = [ "reth-primitives-traits/arbitrary", "reth-trie-common/arbitrary", "alloy-primitives/arbitrary", - "parity-scale-codec/arbitrary" + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary" ] optimism = [ "reth-primitives/optimism", diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 31741207cad..d5f773347b0 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -48,5 +48,6 @@ arbitrary = [ "reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest", - "alloy-primitives/arbitrary" + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" ] diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index b0099d22d5f..acfd45fe34e 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -8,7 +8,7 @@ use reth_primitives::Account; /// /// [`Address`] is the subkey. #[derive(Debug, Default, Clone, Eq, PartialEq, Serialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub struct AccountBeforeTx { /// Address for the account. Acts as `DupSort::SubKey`. diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 2f437e63109..324411613fc 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -97,7 +97,9 @@ test-utils = [ "reth-primitives-traits/test-utils", "reth-db-api/test-utils", "reth-nippy-jar/test-utils", - "reth-trie-common/test-utils" + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" ] bench = [] arbitrary = [ @@ -105,7 +107,9 @@ arbitrary = [ "reth-db-api/arbitrary", "reth-primitives-traits/arbitrary", "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary" + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary" ] optimism = [ "reth-primitives/optimism", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index b93c22cdf67..04a0bf42908 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -108,7 +108,8 @@ serde = [ "notify/serde", "parking_lot/serde", "rand/serde", - "revm/serde" + "revm/serde", + "reth-codecs/serde" ] test-utils = [ "reth-db/test-utils", @@ -124,5 +125,7 @@ test-utils = [ "reth-codecs/test-utils", "reth-db-api/test-utils", "reth-trie-db/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils" ] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 2c6ccbfe689..0616e259710 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -55,5 +55,6 @@ arbitrary = [ "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", "nybbles/arbitrary", - "revm-primitives/arbitrary" + "revm-primitives/arbitrary", + "reth-codecs/arbitrary" ] diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 991fb68f3c0..cf94f135f54 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -19,6 +19,7 @@ pub use nybbles::Nibbles; Deserialize, derive_more::Index, )] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); impl From for StoredNibbles { @@ -74,6 +75,7 @@ impl Compact for StoredNibbles { /// The representation of nibbles of the merkle trie stored in the database. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); impl From for StoredNibblesSubKey { diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 112e661c027..134a3055c2b 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -79,7 +79,8 @@ test-utils = [ "triehash", "reth-trie-common/test-utils", "reth-primitives/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-stages-types/test-utils" ] [[bench]] From ea4fb26063221f2b173412fa64ee1660741181e1 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:23:51 +0900 Subject: [PATCH 150/970] ci: enable `compact-codec` job (#12056) --- .github/workflows/compact.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index c7435220c0f..63f6f282fa2 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -6,7 +6,11 @@ # 4) deserialize previously generated test vectors on: - workflow_dispatch: + + pull_request: + merge_group: + push: + branches: [main] env: CARGO_TERM_COLOR: always From 965dabacad948030059c937ebab0711c9fa56748 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Oct 2024 05:57:09 +0200 Subject: [PATCH 151/970] chore: add missing helpers to BestTransactions (#12044) --- crates/transaction-pool/src/traits.rs | 18 ++++++++++++++++++ crates/transaction-pool/tests/it/best.rs | 3 ++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fa5e22ec0aa..e3591272613 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -747,6 +747,15 @@ pub trait BestTransactions: Iterator + Send { /// listen to pool updates. fn no_updates(&mut self); + /// Convenience function for [`Self::no_updates`] that returns the iterator again. + fn without_updates(mut self) -> Self + where + Self: Sized, + { + self.no_updates(); + self + } + /// Skip all blob transactions. /// /// There's only limited blob space available in a block, once exhausted, EIP-4844 transactions @@ -762,6 +771,15 @@ pub trait BestTransactions: Iterator + Send { /// If set to true, no blob transactions will be returned. fn set_skip_blobs(&mut self, skip_blobs: bool); + /// Convenience function for [`Self::skip_blobs`] that returns the iterator again. + fn without_blobs(mut self) -> Self + where + Self: Sized, + { + self.skip_blobs(); + self + } + /// Creates an iterator which uses a closure to determine whether a transaction should be /// returned by the iterator. /// diff --git a/crates/transaction-pool/tests/it/best.rs b/crates/transaction-pool/tests/it/best.rs index cd7a93eaedb..20e83367643 100644 --- a/crates/transaction-pool/tests/it/best.rs +++ b/crates/transaction-pool/tests/it/best.rs @@ -5,6 +5,7 @@ use reth_transaction_pool::{noop::NoopTransactionPool, BestTransactions, Transac #[test] fn test_best_transactions() { let noop = NoopTransactionPool::default(); - let mut best = noop.best_transactions().filter_transactions(|_| true); + let mut best = + noop.best_transactions().filter_transactions(|_| true).without_blobs().without_updates(); assert!(best.next().is_none()); } From 674616cab976d3d297566373127c6b10e2fbd3ca Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Fri, 25 Oct 2024 05:56:21 +0200 Subject: [PATCH 152/970] chore: change `net::discv5` log target to `discv5` (#12045) --- crates/net/discv5/src/lib.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index d4e8e928fda..a8154b7bd19 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -167,7 +167,7 @@ impl Discv5 { // let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); - trace!(target: "net::discv5", + trace!(target: "discv5", ?enr, "local ENR" ); @@ -271,7 +271,7 @@ impl Discv5 { // to them over RLPx, to be compatible with EL discv5 implementations that don't // enforce this security measure. - trace!(target: "net::discv5", + trace!(target: "discv5", ?enr, %socket, "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" @@ -296,7 +296,7 @@ impl Discv5 { let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "net::discv5", + trace!(target: "discv5", %err, ?enr, "discovered peer is unreachable" @@ -308,7 +308,7 @@ impl Discv5 { } }; if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { - trace!(target: "net::discv5", + trace!(target: "discv5", ?enr, reason, "filtered out discovered peer" @@ -324,7 +324,7 @@ impl Discv5 { .then(|| self.get_fork_id(enr).ok()) .flatten(); - trace!(target: "net::discv5", + trace!(target: "discv5", ?fork_id, ?enr, "discovered peer" @@ -491,7 +491,7 @@ pub async fn bootstrap( bootstrap_nodes: HashSet, discv5: &Arc, ) -> Result<(), Error> { - trace!(target: "net::discv5", + trace!(target: "discv5", ?bootstrap_nodes, "adding bootstrap nodes .." ); @@ -508,7 +508,7 @@ pub async fn bootstrap( let discv5 = discv5.clone(); enr_requests.push(async move { if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "net::discv5", + debug!(target: "discv5", ?enode, %err, "failed adding boot node" @@ -545,7 +545,7 @@ pub fn spawn_populate_kbuckets_bg( for i in (0..bootstrap_lookup_countdown).rev() { let target = discv5::enr::NodeId::random(); - trace!(target: "net::discv5", + trace!(target: "discv5", %target, bootstrap_boost_runs_countdown=i, lookup_interval=format!("{:#?}", pulse_lookup_interval), @@ -563,7 +563,7 @@ pub fn spawn_populate_kbuckets_bg( // selection (ref kademlia) let target = get_lookup_target(kbucket_index, local_node_id); - trace!(target: "net::discv5", + trace!(target: "discv5", %target, lookup_interval=format!("{:#?}", lookup_interval), "starting periodic lookup query" @@ -628,11 +628,11 @@ pub async fn lookup( ); match discv5.find_node(target).await { - Err(err) => trace!(target: "net::discv5", + Err(err) => trace!(target: "discv5", %err, "lookup query failed" ), - Ok(peers) => trace!(target: "net::discv5", + Ok(peers) => trace!(target: "discv5", target=format!("{:#?}", target), peers_count=peers.len(), peers=format!("[{:#}]", peers.iter() @@ -645,7 +645,7 @@ pub async fn lookup( // `Discv5::connected_peers` can be subset of sessions, not all peers make it // into kbuckets, e.g. incoming sessions from peers with // unreachable enrs - debug!(target: "net::discv5", + debug!(target: "discv5", connected_peers=discv5.connected_peers(), "connected peers in routing table" ); From c3182f2a644c2f363f75bdeeee4c2bc69a7b9faf Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 25 Oct 2024 06:37:20 +0200 Subject: [PATCH 153/970] primitives-traits: small refac for `IntegerList` and more doc (#12049) --- crates/primitives-traits/src/integer_list.rs | 31 +++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 570c96c9fda..682fa0cf822 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -9,8 +9,16 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; -/// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the -/// capability to access its elements without decoding it. +/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. +/// +/// This structure provides excellent compression while allowing direct access to individual +/// elements without the need for full decompression. +/// +/// Key features: +/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. +/// - Direct access: elements can be accessed or queried without needing to decode the entire list. +/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit +/// integers. #[derive(Clone, PartialEq, Default, Deref)] pub struct IntegerList(pub RoaringTreemap); @@ -22,12 +30,12 @@ impl fmt::Debug for IntegerList { } impl IntegerList { - /// Creates a new empty `IntegerList`. + /// Creates a new empty [`IntegerList`]. pub fn empty() -> Self { Self(RoaringTreemap::new()) } - /// Creates an `IntegerList` from a list of integers. + /// Creates an [`IntegerList`] from a list of integers. /// /// Returns an error if the list is not pre-sorted. pub fn new(list: impl IntoIterator) -> Result { @@ -36,7 +44,7 @@ impl IntegerList { .map_err(|_| IntegerListError::UnsortedInput) } - // Creates an IntegerList from a pre-sorted list of integers. + /// Creates an [`IntegerList`] from a pre-sorted list of integers. /// /// # Panics /// @@ -54,11 +62,7 @@ impl IntegerList { /// Pushes a new integer to the list. pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { - if self.0.push(value) { - Ok(()) - } else { - Err(IntegerListError::UnsortedInput) - } + self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) } /// Clears the list. @@ -80,10 +84,9 @@ impl IntegerList { /// Deserializes a sequence of bytes into a proper [`IntegerList`]. pub fn from_bytes(data: &[u8]) -> Result { - Ok(Self( - RoaringTreemap::deserialize_from(data) - .map_err(|_| IntegerListError::FailedToDeserialize)?, - )) + RoaringTreemap::deserialize_from(data) + .map(Self) + .map_err(|_| IntegerListError::FailedToDeserialize) } } From d9889787a777738f0a62d6f933587b64ebf2b2d6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 25 Oct 2024 09:01:44 +0400 Subject: [PATCH 154/970] feat: improve e2e tests API + feeHistory test (#12058) --- Cargo.lock | 24 ++++++ Cargo.toml | 1 + crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/engine_api.rs | 3 +- crates/e2e-test-utils/src/lib.rs | 6 +- crates/e2e-test-utils/src/node.rs | 24 ++---- crates/e2e-test-utils/src/payload.rs | 23 +++-- crates/ethereum/node/Cargo.toml | 3 + crates/ethereum/node/tests/e2e/blobs.rs | 16 ++-- crates/ethereum/node/tests/e2e/dev.rs | 4 +- crates/ethereum/node/tests/e2e/eth.rs | 9 +- crates/ethereum/node/tests/e2e/main.rs | 1 + crates/ethereum/node/tests/e2e/p2p.rs | 7 +- crates/ethereum/node/tests/e2e/rpc.rs | 109 ++++++++++++++++++++++++ crates/optimism/node/tests/e2e/p2p.rs | 2 - crates/optimism/node/tests/e2e/utils.rs | 31 +++---- 16 files changed, 202 insertions(+), 62 deletions(-) create mode 100644 crates/ethereum/node/tests/e2e/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index a73a4231533..d3aa0b2953a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,6 +128,26 @@ dependencies = [ "serde_with", ] +[[package]] +name = "alloy-contract" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460ab80ce4bda1c80bcf96fe7460520476f2c7b734581c6567fac2708e2a60ef" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror", +] + [[package]] name = "alloy-dyn-abi" version = "0.8.8" @@ -7018,6 +7038,7 @@ dependencies = [ "alloy-rpc-types", "alloy-signer", "alloy-signer-local", + "derive_more 1.0.0", "eyre", "futures-util", "jsonrpsee", @@ -7971,10 +7992,13 @@ name = "reth-node-ethereum" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-contract", + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-provider", "alloy-signer", + "alloy-sol-types", "eyre", "futures", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index e23efdeb315..d01ee01ce5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -433,6 +433,7 @@ alloy-sol-types = "0.8.0" alloy-trie = { version = "0.7", default-features = false } alloy-consensus = { version = "0.5.4", default-features = false } +alloy-contract = { version = "0.5.4", default-features = false } alloy-eips = { version = "0.5.4", default-features = false } alloy-genesis = { version = "0.5.4", default-features = false } alloy-json-rpc = { version = "0.5.4", default-features = false } diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 04f031daa58..67bb7455536 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -45,3 +45,4 @@ alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true +derive_more.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 5027b2620a6..729205211ff 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -50,12 +50,13 @@ impl EngineApiTestContext, ) -> eyre::Result where E::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, E::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { + let versioned_hashes = + payload.block().blob_versioned_hashes_iter().copied().collect::>(); // submit payload to engine api let submission = if self .chain_spec diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index f5ee1e5e669..1e9717d082c 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -49,6 +49,7 @@ pub async fn setup( num_nodes: usize, chain_spec: Arc, is_dev: bool, + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where N: Default + Node> + NodeTypesWithEngine, @@ -84,7 +85,7 @@ where .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, attributes_generator).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { @@ -109,6 +110,7 @@ pub async fn setup_engine( num_nodes: usize, chain_spec: Arc, is_dev: bool, + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<( Vec>>>, TaskManager, @@ -166,7 +168,7 @@ where }) .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, attributes_generator).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index f4007201804..07df36a33e1 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -59,12 +59,15 @@ where AddOns: RethRpcAddOns, { /// Creates a new test node - pub async fn new(node: FullNode) -> eyre::Result { + pub async fn new( + node: FullNode, + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let builder = node.payload_builder.clone(); Ok(Self { inner: node.clone(), - payload: PayloadTestContext::new(builder).await?, + payload: PayloadTestContext::new(builder, attributes_generator).await?, network: NetworkTestContext::new(node.network.clone()), engine_api: EngineApiTestContext { chain_spec: node.chain_spec(), @@ -90,7 +93,6 @@ where &mut self, length: u64, tx_generator: impl Fn(u64) -> Pin>>, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + Copy, ) -> eyre::Result> where Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, @@ -101,7 +103,7 @@ where for i in 0..length { let raw_tx = tx_generator(i).await; let tx_hash = self.rpc.inject_tx(raw_tx).await?; - let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let (payload, eth_attr) = self.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; self.assert_new_block(tx_hash, block_hash, block_number).await?; @@ -116,14 +118,13 @@ where /// It triggers the resolve payload via engine api and expects the built payload event. pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { // trigger new payload building draining the pool - let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); + let eth_attr = self.payload.new_payload().await.unwrap(); // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; // wait for the payload builder to have finished building @@ -137,8 +138,6 @@ where /// Advances the node forward one block pub async fn advance_block( &mut self, - versioned_hashes: Vec, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where ::ExecutionPayloadEnvelopeV3: @@ -146,16 +145,11 @@ where ::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - let (payload, eth_attr) = self.new_payload(attributes_generator).await?; + let (payload, eth_attr) = self.new_payload().await?; let block_hash = self .engine_api - .submit_payload( - payload.clone(), - eth_attr.clone(), - PayloadStatusEnum::Valid, - versioned_hashes, - ) + .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) .await?; // trigger forkchoice update via engine api to commit the block to the blockchain diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index a5e4f56ac42..29aa11895b7 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -5,29 +5,36 @@ use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -#[derive(Debug)] +#[derive(derive_more::Debug)] pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, pub timestamp: u64, + #[debug(skip)] + attributes_generator: Box T::PayloadBuilderAttributes>, } impl PayloadTestContext { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new( + payload_builder: PayloadBuilderHandle, + attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); // Cancun timestamp - Ok(Self { payload_event_stream, payload_builder, timestamp: 1710338135 }) + Ok(Self { + payload_event_stream, + payload_builder, + timestamp: 1710338135, + attributes_generator: Box::new(attributes_generator), + }) } /// Creates a new payload job from static attributes - pub async fn new_payload( - &mut self, - attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes, - ) -> eyre::Result { + pub async fn new_payload(&mut self) -> eyre::Result { self.timestamp += 1; - let attributes = attributes_generator(self.timestamp); + let attributes = (self.attributes_generator)(self.timestamp); self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index e7784637a06..98224b99e26 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -55,6 +55,9 @@ alloy-consensus.workspace = true alloy-provider.workspace = true rand.workspace = true alloy-signer.workspace = true +alloy-eips.workspace = true +alloy-sol-types.workspace = true +alloy-contract.workspace = true [features] default = [] diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index b4d9a532aeb..976727bc815 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -41,7 +41,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; let wallets = Wallet::new(2).gen(); let blob_wallet = wallets.first().unwrap(); @@ -51,7 +51,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { let raw_tx = TransactionTestContext::transfer_tx_bytes(1, second_wallet.clone()).await; let tx_hash = node.rpc.inject_tx(raw_tx).await?; // build payload with normal tx - let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + let (payload, attributes) = node.new_payload().await?; // clean the pool node.inner.pool.remove_transactions(vec![tx_hash]); @@ -64,16 +64,14 @@ async fn can_handle_blobs() -> eyre::Result<()> { // fetch it from rpc let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; // validate sidecar - let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + TransactionTestContext::validate_sidecar(envelope); // build a payload - let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + let (blob_payload, blob_attr) = node.new_payload().await?; // submit the blob payload - let blob_block_hash = node - .engine_api - .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) - .await?; + let blob_block_hash = + node.engine_api.submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid).await?; let (_, _) = tokio::join!( // send fcu with blob hash @@ -83,7 +81,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { ); // submit normal payload - node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid).await?; tokio::time::sleep(std::time::Duration::from_secs(3)).await; diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index cad2fb34e5d..ead438b5a67 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; @@ -17,7 +18,8 @@ use reth_tasks::TaskManager; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = setup::(1, custom_chain(), true).await?; + let (mut nodes, _tasks, _) = + setup::(1, custom_chain(), true, eth_payload_attributes).await?; assert_chain_advances(nodes.pop().unwrap().inner).await; Ok(()) diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index 14bfb92d477..cb7517c0c93 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -26,6 +26,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -36,7 +37,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -74,7 +75,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; // Configure wallet from test mnemonic and create dummy transfer tx let wallet = Wallet::default(); @@ -84,7 +85,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -120,7 +121,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeTestContext::new(node).await?; + let node = NodeTestContext::new(node, eth_payload_attributes).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 5dff7be17e1..4ed8ac5fcb6 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -4,6 +4,7 @@ mod blobs; mod dev; mod eth; mod p2p; +mod rpc; mod utils; const fn main() {} diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 0fae23a0857..180b88bbd5a 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -30,6 +30,7 @@ async fn can_sync() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -41,7 +42,7 @@ async fn can_sync() -> eyre::Result<()> { let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = first_node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -76,7 +77,7 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { ); let (mut nodes, _tasks, wallet) = - setup_engine::(2, chain_spec.clone(), false).await?; + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; let mut node = nodes.pop().unwrap(); let signers = wallet.gen(); let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); @@ -139,7 +140,7 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { pending.push(provider.send_tx_envelope(tx).await?); } - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; assert!(payload.block().raw_transactions().len() == tx_count); for pending in pending { diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs new file mode 100644 index 00000000000..ddf3d5cba2a --- /dev/null +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -0,0 +1,109 @@ +use crate::utils::eth_payload_attributes; +use alloy_eips::calc_next_block_base_fee; +use alloy_primitives::U256; +use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::setup_engine; +use reth_node_ethereum::EthereumNode; +use std::sync::Arc; + +alloy_sol_types::sol! { + #[sol(rpc, bytecode = "6080604052348015600f57600080fd5b5060405160db38038060db833981016040819052602a91607a565b60005b818110156074576040805143602082015290810182905260009060600160408051601f19818403018152919052805160209091012080555080606d816092565b915050602d565b505060b8565b600060208284031215608b57600080fd5b5051919050565b60006001820160b157634e487b7160e01b600052601160045260246000fd5b5060010190565b60168060c56000396000f3fe6080604052600080fdfea164736f6c6343000810000a")] + contract GasWaster { + constructor(uint256 iterations) { + for (uint256 i = 0; i < iterations; i++) { + bytes32 slot = keccak256(abi.encode(block.number, i)); + assembly { + sstore(slot, slot) + } + } + } + } +} + +#[tokio::test] +async fn test_fee_history() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + let fee_history = provider.get_fee_history(10, 0_u64.into(), &[]).await?; + + let genesis_base_fee = chain_spec.initial_base_fee().unwrap() as u128; + let expected_first_base_fee = genesis_base_fee - + genesis_base_fee / chain_spec.base_fee_params_at_block(0).max_change_denominator; + assert_eq!(fee_history.base_fee_per_gas[0], genesis_base_fee); + assert_eq!(fee_history.base_fee_per_gas[1], expected_first_base_fee,); + + // Spend some gas + let builder = GasWaster::deploy_builder(&provider, U256::from(500)).send().await?; + node.advance_block().await?; + let receipt = builder.get_receipt().await?; + assert!(receipt.status()); + + let block = provider.get_block_by_number(1.into(), false).await?.unwrap(); + assert_eq!(block.header.gas_used as u128, receipt.gas_used,); + assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64); + + for _ in 0..100 { + let _ = + GasWaster::deploy_builder(&provider, U256::from(rng.gen_range(0..1000))).send().await?; + + node.advance_block().await?; + } + + let latest_block = provider.get_block_number().await?; + + for _ in 0..100 { + let latest_block = rng.gen_range(0..=latest_block); + let block_count = rng.gen_range(1..=(latest_block + 1)); + + let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?; + + let mut prev_header = provider + .get_block_by_number((latest_block + 1 - block_count).into(), false) + .await? + .unwrap() + .header; + for block in (latest_block + 2 - block_count)..=latest_block { + let expected_base_fee = calc_next_block_base_fee( + prev_header.gas_used, + prev_header.gas_limit, + prev_header.base_fee_per_gas.unwrap(), + chain_spec.base_fee_params_at_block(block), + ); + + let header = provider.get_block_by_number(block.into(), false).await?.unwrap().header; + + assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee as u64); + assert_eq!( + header.base_fee_per_gas.unwrap(), + fee_history.base_fee_per_gas[(block + block_count - 1 - latest_block) as usize] + as u64 + ); + + prev_header = header; + } + } + + Ok(()) +} diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index ebd35cc8a5c..30affa9bafb 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -51,7 +51,6 @@ async fn can_sync() -> eyre::Result<()> { side_payload_chain[0].0.clone(), side_payload_chain[0].1.clone(), PayloadStatusEnum::Valid, - Default::default(), ) .await; @@ -81,7 +80,6 @@ async fn can_sync() -> eyre::Result<()> { } .to_string(), }, - Default::default(), ) .await; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 8ea8df380b0..48175e5b21a 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -21,6 +21,7 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa num_nodes, Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), false, + optimism_payload_attributes, ) .await } @@ -31,23 +32,19 @@ pub(crate) async fn advance_chain( node: &mut OpNode, wallet: Arc>, ) -> eyre::Result> { - node.advance( - length as u64, - |_| { - let wallet = wallet.clone(); - Box::pin(async move { - let mut wallet = wallet.lock().await; - let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( - wallet.chain_id, - wallet.inner.clone(), - wallet.inner_nonce, - ); - wallet.inner_nonce += 1; - tx_fut.await - }) - }, - optimism_payload_attributes, - ) + node.advance(length as u64, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) .await } From 5a5ec73c37d2f84e27db7bc86d5c7893f641ca87 Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Fri, 25 Oct 2024 01:02:43 -0400 Subject: [PATCH 155/970] Change return type of ReceiptBuilder (#11987) --- crates/rpc/rpc-eth-types/src/receipt.rs | 17 ++++------------- crates/rpc/rpc/src/eth/helpers/block.rs | 3 ++- crates/rpc/rpc/src/eth/helpers/receipt.rs | 3 ++- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index c3232f2383b..198ca79aa2a 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -2,10 +2,8 @@ use alloy_consensus::Transaction; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{ - AnyReceiptEnvelope, AnyTransactionReceipt, Log, ReceiptWithBloom, TransactionReceipt, -}; -use alloy_serde::{OtherFields, WithOtherFields}; +use alloy_rpc_types::{AnyReceiptEnvelope, Log, ReceiptWithBloom, TransactionReceipt}; +use alloy_serde::OtherFields; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; use revm_primitives::calc_blob_gasprice; @@ -111,15 +109,8 @@ impl ReceiptBuilder { Ok(Self { base, other: Default::default() }) } - /// Adds fields to response body. - pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { - self.other.append(&mut fields); - self - } - /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> AnyTransactionReceipt { - let Self { base, other } = self; - WithOtherFields { inner: base, other } + pub fn build(self) -> TransactionReceipt> { + self.base } } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b2ff30b88f2..b29a24c38c4 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations specific to blocks. use alloy_rpc_types::{AnyTransactionReceipt, BlockId}; +use alloy_serde::WithOtherFields; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ @@ -55,9 +56,9 @@ where excess_blob_gas, timestamp, }; - ReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) + .map(WithOtherFields::new) }) .collect::, Self::Error>>() .map(Some) diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 2ac36094494..570ec4fa3c0 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,5 +1,6 @@ //! Builds an RPC receipt response w.r.t. data layout of network. +use alloy_serde::WithOtherFields; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; @@ -30,6 +31,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + Ok(WithOtherFields::new(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build())) } } From 269d705c706479cebe303bd527d84a900b068070 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Oct 2024 10:38:07 +0200 Subject: [PATCH 156/970] test: ensure we acquire file lock in tests (#12064) --- crates/storage/db/src/lockfile.rs | 51 ++++++++++++++++++------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 6dc063a167a..a87ab7393f1 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -30,31 +30,35 @@ impl StorageLock { /// Note: In-process exclusivity is not on scope. If called from the same process (or another /// with the same PID), it will succeed. pub fn try_acquire(path: &Path) -> Result { - let file_path = path.join(LOCKFILE_NAME); - #[cfg(feature = "disable-lock")] { + let file_path = path.join(LOCKFILE_NAME); // Too expensive for ef-tests to write/read lock to/from disk. Ok(Self(Arc::new(StorageLockInner { file_path }))) } #[cfg(not(feature = "disable-lock"))] - { - if let Some(process_lock) = ProcessUID::parse(&file_path)? { - if process_lock.pid != (process::id() as usize) && process_lock.is_active() { - error!( - target: "reth::db::lockfile", - path = ?file_path, - pid = process_lock.pid, - start_time = process_lock.start_time, - "Storage lock already taken." - ); - return Err(StorageLockError::Taken(process_lock.pid)) - } - } + Self::try_acquire_file_lock(path) + } - Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) + /// Acquire a file write lock. + #[cfg(any(test, not(feature = "disable-lock")))] + fn try_acquire_file_lock(path: &Path) -> Result { + let file_path = path.join(LOCKFILE_NAME); + if let Some(process_lock) = ProcessUID::parse(&file_path)? { + if process_lock.pid != (process::id() as usize) && process_lock.is_active() { + error!( + target: "reth::db::lockfile", + path = ?file_path, + pid = process_lock.pid, + start_time = process_lock.start_time, + "Storage lock already taken." + ); + return Err(StorageLockError::Taken(process_lock.pid)) + } } + + Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) } } @@ -164,10 +168,10 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); // Same process can re-acquire the lock - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); // A lock of a non existent PID can be acquired. let lock_file = temp_dir.path().join(LOCKFILE_NAME); @@ -177,18 +181,21 @@ mod tests { fake_pid += 1; } ProcessUID { pid: fake_pid, start_time: u64::MAX }.write(&lock_file).unwrap(); - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); let mut pid_1 = ProcessUID::new(1).unwrap(); // If a parsed `ProcessUID` exists, the lock can NOT be acquired. pid_1.write(&lock_file).unwrap(); - assert_eq!(Err(StorageLockError::Taken(1)), StorageLock::try_acquire(temp_dir.path())); + assert_eq!( + Err(StorageLockError::Taken(1)), + StorageLock::try_acquire_file_lock(temp_dir.path()) + ); // A lock of a different but existing PID can be acquired ONLY IF the start_time differs. pid_1.start_time += 1; pid_1.write(&lock_file).unwrap(); - assert_eq!(Ok(lock), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock), StorageLock::try_acquire_file_lock(temp_dir.path())); } #[test] @@ -198,7 +205,7 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let lock_file = temp_dir.path().join(LOCKFILE_NAME); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); assert!(lock_file.exists()); drop(lock); From 2ae7ee51e0beb1920d4ab29e3736756321ccdee0 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:13:46 +0900 Subject: [PATCH 157/970] fix: increase `arbitrary::Unstructured` buffer size if `NotEnoughData` is thrown (#12069) --- crates/cli/commands/src/test_vectors/compact.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index cda7d5bd578..162ee1ceaa4 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -191,7 +191,21 @@ where runner.rng().fill_bytes(&mut bytes); compact_buffer.clear(); - let obj = T::arbitrary(&mut arbitrary::Unstructured::new(&bytes))?; + // Sometimes type T, might require extra arbitrary data, so we retry it a few times. + let mut tries = 0; + let obj = loop { + match T::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) { + Ok(obj) => break obj, + Err(err) => { + if tries < 5 && matches!(err, arbitrary::Error::NotEnoughData) { + tries += 1; + bytes.extend(std::iter::repeat(0u8).take(256)); + } else { + return Err(err)? + } + } + } + }; let res = obj.to_compact(&mut compact_buffer); if IDENTIFIER_TYPE.contains(&type_name) { From a87d654c55eacc8f4878affd671e08df7fa536a6 Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Fri, 25 Oct 2024 07:48:58 -0400 Subject: [PATCH 158/970] feat: introduce iterator for default_ethereum_payload function (#11978) --- crates/ethereum/payload/src/lib.rs | 46 ++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 951a909b91c..bb611441f03 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -33,7 +33,8 @@ use reth_primitives::{ use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, + ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ @@ -45,6 +46,10 @@ use revm_primitives::calc_excess_blob_gas; use std::sync::Arc; use tracing::{debug, trace, warn}; +type BestTransactionsIter = Box< + dyn BestTransactions::Transaction>>>, +>; + /// Ethereum payload builder #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct EthereumPayloadBuilder { @@ -94,7 +99,11 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env) + + let pool = args.pool.clone(); + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + }) } fn build_empty_payload( @@ -102,19 +111,25 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let args = BuildArguments { + let args = BuildArguments::new( client, - config, // we use defaults here because for the empty payload we don't need to execute anything - pool: NoopTransactionPool::default(), - cached_reads: Default::default(), - cancel: Default::default(), - best_payload: None, - }; + NoopTransactionPool::default(), + Default::default(), + config, + Default::default(), + None, + ); + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + + let pool = args.pool.clone(); + + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + })? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -124,16 +139,18 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload( +pub fn default_ethereum_payload( evm_config: EvmConfig, args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, + best_txs: F, ) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, + F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -153,11 +170,10 @@ where let mut executed_txs = Vec::new(); let mut executed_senders = Vec::new(); - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( + let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let mut total_fees = U256::ZERO; let block_number = initialized_block_env.number.to::(); From 58441c158b27986b904efccf6cbe5953d090e083 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 25 Oct 2024 16:10:20 +0400 Subject: [PATCH 159/970] fix: fail on unwind during `reth import` (#12062) --- crates/cli/commands/src/import.rs | 1 + .../cli/src/commands/build_pipeline.rs | 1 + crates/stages/api/src/error.rs | 3 +++ crates/stages/api/src/pipeline/builder.rs | 19 +++++++++++++++++-- crates/stages/api/src/pipeline/mod.rs | 8 ++++++++ 5 files changed, 30 insertions(+), 2 deletions(-) diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 6b750d32a3d..a7c81e53052 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -207,6 +207,7 @@ where .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index f23cb9a7c16..a197f93a8b4 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -75,6 +75,7 @@ where .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 68e1d00fdae..8562b10b6a5 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -188,4 +188,7 @@ pub enum PipelineError { /// Internal error #[error(transparent)] Internal(#[from] RethError), + /// The pipeline encountered an unwind when `fail_on_unwind` was set to `true`. + #[error("unexpected unwind")] + UnexpectedUnwind, } diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 79a4c477ee6..45bdc2d8942 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -14,6 +14,7 @@ pub struct PipelineBuilder { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + fail_on_unwind: bool, } impl PipelineBuilder { @@ -62,6 +63,12 @@ impl PipelineBuilder { self } + /// Set whether pipeline should fail on unwind. + pub const fn with_fail_on_unwind(mut self, yes: bool) -> Self { + self.fail_on_unwind = yes; + self + } + /// Builds the final [`Pipeline`] using the given database. pub fn build( self, @@ -72,7 +79,7 @@ impl PipelineBuilder { N: ProviderNodeTypes, ProviderFactory: DatabaseProviderFactory, { - let Self { stages, max_block, tip_tx, metrics_tx } = self; + let Self { stages, max_block, tip_tx, metrics_tx, fail_on_unwind } = self; Pipeline { provider_factory, stages, @@ -82,13 +89,20 @@ impl PipelineBuilder { event_sender: Default::default(), progress: Default::default(), metrics_tx, + fail_on_unwind, } } } impl Default for PipelineBuilder { fn default() -> Self { - Self { stages: Vec::new(), max_block: None, tip_tx: None, metrics_tx: None } + Self { + stages: Vec::new(), + max_block: None, + tip_tx: None, + metrics_tx: None, + fail_on_unwind: false, + } } } @@ -97,6 +111,7 @@ impl std::fmt::Debug for PipelineBuilder { f.debug_struct("PipelineBuilder") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 14225a59528..399a3ffb4b7 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -78,6 +78,9 @@ pub struct Pipeline { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + /// Whether an unwind should fail the syncing process. Should only be set when downloading + /// blocks from trusted sources and expecting them to be valid. + fail_on_unwind: bool, } impl Pipeline { @@ -164,6 +167,10 @@ impl Pipeline { loop { let next_action = self.run_loop().await?; + if next_action.is_unwind() && self.fail_on_unwind { + return Err(PipelineError::UnexpectedUnwind) + } + // Terminate the loop early if it's reached the maximum user // configured block. if next_action.should_continue() && @@ -586,6 +593,7 @@ impl std::fmt::Debug for Pipeline { .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) .field("event_sender", &self.event_sender) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } From 26d1b1524bea366e886ecf27a0e76e53d07aa0a5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Oct 2024 14:17:07 +0200 Subject: [PATCH 160/970] fix: ignore discovered peers with tcp port 0 (#12065) --- crates/net/network/src/discovery.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index d366027d680..5b2bb788f47 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -214,6 +214,10 @@ impl Discovery { fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { let peer_id = record.id; let tcp_addr = record.tcp_addr(); + if tcp_addr.port() == 0 { + // useless peer for p2p + return + } let udp_addr = record.udp_addr(); let addr = PeerAddr::new(tcp_addr, Some(udp_addr)); _ = From 07bda5d453749a513fdd5afc5ea48dd90a3ba5a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Oct 2024 14:17:32 +0200 Subject: [PATCH 161/970] chore: EthBuiltPayload touchups (#12067) --- .../ethereum/engine-primitives/src/payload.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 1ad9c5450ee..50c4852545b 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -38,7 +38,9 @@ pub struct EthBuiltPayload { // === impl BuiltPayload === impl EthBuiltPayload { - /// Initializes the payload with the given initial block. + /// Initializes the payload with the given initial block + /// + /// Caution: This does not set any [`BlobTransactionSidecar`]. pub const fn new( id: PayloadId, block: SealedBlock, @@ -69,9 +71,18 @@ impl EthBuiltPayload { } /// Adds sidecars to the payload. - pub fn extend_sidecars(&mut self, sidecars: Vec) { + pub fn extend_sidecars(&mut self, sidecars: impl IntoIterator) { self.sidecars.extend(sidecars) } + + /// Same as [`Self::extend_sidecars`] but returns the type again. + pub fn with_sidecars( + mut self, + sidecars: impl IntoIterator, + ) -> Self { + self.extend_sidecars(sidecars); + self + } } impl BuiltPayload for EthBuiltPayload { @@ -134,7 +145,7 @@ impl From for ExecutionPayloadEnvelopeV3 { // Spec: // should_override_builder: false, - blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + blobs_bundle: sidecars.into(), } } } From 09506aa130b4c0dae3fb7029f27845b4f2c58321 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Oct 2024 14:25:52 +0200 Subject: [PATCH 162/970] chore: rm TransactionFilter (#12066) --- crates/transaction-pool/src/traits.rs | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index e3591272613..c21a7a4ea75 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -815,25 +815,6 @@ where } } -/// A subtrait on the [`BestTransactions`] trait that allows to filter transactions. -pub trait BestTransactionsFilter: BestTransactions { - /// Creates an iterator which uses a closure to determine if a transaction should be yielded. - /// - /// Given an element the closure must return true or false. The returned iterator will yield - /// only the elements for which the closure returns true. - /// - /// Descendant transactions will be skipped. - fn filter

(self, predicate: P) -> BestTransactionFilter - where - P: FnMut(&Self::Item) -> bool, - Self: Sized, - { - BestTransactionFilter::new(self, predicate) - } -} - -impl BestTransactionsFilter for T where T: BestTransactions {} - /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { fn mark_invalid(&mut self, _tx: &T) {} From e93e373853ae2735fc6ca86ef0273bcacfd15c31 Mon Sep 17 00:00:00 2001 From: Debjit Bhowal Date: Fri, 25 Oct 2024 19:51:17 +0530 Subject: [PATCH 163/970] making `command` public (#12074) --- bin/reth/src/cli/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 01662eb4dcb..192ab670028 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -39,7 +39,7 @@ pub struct Cli, + pub command: Commands, /// The chain this node is running. /// @@ -52,7 +52,7 @@ pub struct Cli, + pub chain: Arc, /// Add a new instance of a node. /// @@ -68,10 +68,11 @@ pub struct Cli Date: Fri, 25 Oct 2024 18:39:52 +0200 Subject: [PATCH 164/970] primitive-traits: add unit tests for `Account` (#12048) --- crates/primitives-traits/src/account.rs | 46 +++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 063504b2a0e..ae58973edd7 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -256,4 +256,50 @@ mod tests { assert_eq!(decoded, bytecode); assert!(remainder.is_empty()); } + + #[test] + fn test_account_has_bytecode() { + // Account with no bytecode (None) + let acc_no_bytecode = Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }; + assert!(!acc_no_bytecode.has_bytecode(), "Account should not have bytecode"); + + // Account with bytecode hash set to KECCAK_EMPTY (should have bytecode) + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert!(acc_empty_bytecode.has_bytecode(), "Account should have bytecode"); + + // Account with a non-empty bytecode hash + let acc_with_bytecode = Account { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from_slice(&[0x11u8; 32])), + }; + assert!(acc_with_bytecode.has_bytecode(), "Account should have bytecode"); + } + + #[test] + fn test_account_get_bytecode_hash() { + // Account with no bytecode (should return KECCAK_EMPTY) + let acc_no_bytecode = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; + assert_eq!(acc_no_bytecode.get_bytecode_hash(), KECCAK_EMPTY, "Should return KECCAK_EMPTY"); + + // Account with bytecode hash set to KECCAK_EMPTY + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert_eq!( + acc_empty_bytecode.get_bytecode_hash(), + KECCAK_EMPTY, + "Should return KECCAK_EMPTY" + ); + + // Account with a valid bytecode hash + let bytecode_hash = B256::from_slice(&[0x11u8; 32]); + let acc_with_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(bytecode_hash) }; + assert_eq!( + acc_with_bytecode.get_bytecode_hash(), + bytecode_hash, + "Should return the bytecode hash" + ); + } } From e676d71d0b5c4f372435277031e41afde349a073 Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:08:01 -0400 Subject: [PATCH 165/970] feat: Freeze payload if final (#12078) Co-authored-by: Matthias Seitz --- crates/optimism/payload/src/builder.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index c85abfad7c4..85f687aa803 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -522,6 +522,8 @@ where trie: Arc::new(trie_output), }; + let no_tx_pool = attributes.no_tx_pool; + let mut payload = OptimismBuiltPayload::new( attributes.payload_attributes.id, sealed_block, @@ -534,5 +536,12 @@ where // extend the payload with the blob sidecars from the executed txs payload.extend_sidecars(blob_sidecars); - Ok(BuildOutcome::Better { payload, cached_reads }) + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included in + // the payload. In other words, the payload is deterministic and we can freeze it once we've + // successfully built it. + Ok(BuildOutcome::Freeze(payload)) + } else { + Ok(BuildOutcome::Better { payload, cached_reads }) + } } From d91cacd14ac56495100a7785352e1a1ed8c8a42d Mon Sep 17 00:00:00 2001 From: Jeff Date: Fri, 25 Oct 2024 13:11:24 -0400 Subject: [PATCH 166/970] feat(rpc): rpc rate limiter impl (#11952) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc-builder/Cargo.toml | 2 + crates/rpc/rpc-builder/src/lib.rs | 3 + crates/rpc/rpc-builder/src/rate_limiter.rs | 116 +++++++++++++++++++++ 4 files changed, 122 insertions(+) create mode 100644 crates/rpc/rpc-builder/src/rate_limiter.rs diff --git a/Cargo.lock b/Cargo.lock index d3aa0b2953a..a99803e1bc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8744,6 +8744,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", + "tokio-util", "tower 0.4.13", "tower-http", "tracing", diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index cc72c2ebf92..b9b511a078b 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -50,6 +50,8 @@ metrics.workspace = true serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tracing.workspace = true +tokio-util = { workspace = true } +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] reth-chainspec.workspace = true diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 72b53efe674..ceafe206531 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -226,6 +226,9 @@ pub use eth::EthHandlers; mod metrics; pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; +// Rpc rate limiter +pub mod rate_limiter; + /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] pub async fn launch( diff --git a/crates/rpc/rpc-builder/src/rate_limiter.rs b/crates/rpc/rpc-builder/src/rate_limiter.rs new file mode 100644 index 00000000000..85df0eee61c --- /dev/null +++ b/crates/rpc/rpc-builder/src/rate_limiter.rs @@ -0,0 +1,116 @@ +//! [`jsonrpsee`] helper layer for rate limiting certain methods. + +use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore}; +use tokio_util::sync::PollSemaphore; +use tower::Layer; + +/// Rate limiter for the RPC server. +/// +/// Rate limits expensive calls such as debug_ and trace_. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimiter { + inner: Arc, +} + +impl RpcRequestRateLimiter { + /// Create a new rate limit layer with the given number of permits. + pub fn new(rate_limit: usize) -> Self { + Self { + inner: Arc::new(RpcRequestRateLimiterInner { + call_guard: PollSemaphore::new(Arc::new(Semaphore::new(rate_limit))), + }), + } + } +} + +impl Layer for RpcRequestRateLimiter { + type Service = RpcRequestRateLimitingService; + + fn layer(&self, inner: S) -> Self::Service { + RpcRequestRateLimitingService::new(inner, self.clone()) + } +} + +/// Rate Limiter for the RPC server +#[derive(Debug, Clone)] +struct RpcRequestRateLimiterInner { + /// Semaphore to rate limit calls + call_guard: PollSemaphore, +} + +/// A [`RpcServiceT`] middleware that rate limits RPC calls to the server. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimitingService { + /// The rate limiter for RPC requests + rate_limiter: RpcRequestRateLimiter, + /// The inner service being wrapped + inner: S, +} + +impl RpcRequestRateLimitingService { + /// Create a new rate limited service. + pub const fn new(service: S, rate_limiter: RpcRequestRateLimiter) -> Self { + Self { inner: service, rate_limiter } + } +} + +impl<'a, S> RpcServiceT<'a> for RpcRequestRateLimitingService +where + S: RpcServiceT<'a> + Send + Sync + Clone + 'static, +{ + type Future = RateLimitingRequestFuture; + + fn call(&self, req: Request<'a>) -> Self::Future { + let method_name = req.method_name(); + if method_name.starts_with("trace_") || method_name.starts_with("debug_") { + RateLimitingRequestFuture { + fut: self.inner.call(req), + guard: Some(self.rate_limiter.inner.call_guard.clone()), + permit: None, + } + } else { + // if we don't need to rate limit, then there + // is no need to get a semaphore permit + RateLimitingRequestFuture { fut: self.inner.call(req), guard: None, permit: None } + } + } +} + +/// Response future. +#[pin_project::pin_project] +pub struct RateLimitingRequestFuture { + #[pin] + fut: F, + guard: Option, + permit: Option, +} + +impl std::fmt::Debug for RateLimitingRequestFuture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("RateLimitingRequestFuture") + } +} + +impl> Future for RateLimitingRequestFuture { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + if let Some(guard) = this.guard.as_mut() { + *this.permit = ready!(guard.poll_acquire(cx)); + *this.guard = None; + } + let res = this.fut.poll(cx); + if res.is_ready() { + *this.permit = None; + } + res + } +} From 16b64d8284a87888bae296992a7e4789594e8d16 Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Sat, 26 Oct 2024 03:22:02 +0700 Subject: [PATCH 167/970] feat(make): add docs lint (#12082) --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 5ad7abac675..ac4ad103858 100644 --- a/Makefile +++ b/Makefile @@ -497,6 +497,7 @@ test: pr: make lint && \ make update-book-cli && \ + cargo docs --document-private-items && \ make test check-features: From a349919b5c1b3f4b28959acebfa55f6786845fe9 Mon Sep 17 00:00:00 2001 From: AJStonewee Date: Fri, 25 Oct 2024 18:44:00 -0400 Subject: [PATCH 168/970] docs: remove deleted op-sync workflow from docs (#12086) Co-authored-by: Oliver --- docs/repo/ci.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/repo/ci.md b/docs/repo/ci.md index 5ed2cec0091..863a18f9c38 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -7,8 +7,7 @@ The CI runs a couple of workflows: - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) - **[bench]**: Runs benchmarks -- **[eth-sync]**: Runs Ethereum mainnet sync tests -- **[op-sync]**: Runs base mainnet sync tests for Optimism +- **[sync]**: Runs sync tests - **[stage]**: Runs all `stage run` commands ### Docs @@ -38,8 +37,7 @@ The CI runs a couple of workflows: [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml -[eth-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/eth-sync.yml -[op-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/op-sync.yml +[sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sync.yml [stage]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/stage.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml From fa59bd512e877e4677890b5c154b80816c07480e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 26 Oct 2024 03:18:34 +0400 Subject: [PATCH 169/970] fix: correctly detect first sync on headers stage (#12085) --- crates/stages/stages/src/stages/headers.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 199e015c2dc..49e687a96a1 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -2,7 +2,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; use reth_consensus::Consensus; -use reth_db::{tables, RawKey, RawTable, RawValue}; +use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::DbTxMut, @@ -155,11 +155,13 @@ where // If we only have the genesis block hash, then we are at first sync, and we can remove it, // add it to the collector and use tx.append on all hashes. - if let Some((hash, block_number)) = cursor_header_numbers.last()? { - if block_number.value()? == 0 { - self.hash_collector.insert(hash.key()?, 0)?; - cursor_header_numbers.delete_current()?; - first_sync = true; + if provider.tx_ref().entries::>()? == 1 { + if let Some((hash, block_number)) = cursor_header_numbers.last()? { + if block_number.value()? == 0 { + self.hash_collector.insert(hash.key()?, 0)?; + cursor_header_numbers.delete_current()?; + first_sync = true; + } } } From e0ad59834de2e77c0283f1a5c1fc2fd7718a9875 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Sat, 26 Oct 2024 08:10:15 +0200 Subject: [PATCH 170/970] dev: add `requests` to `EthBuiltPayload` (#12072) --- Cargo.lock | 3 ++- crates/e2e-test-utils/src/engine_api.rs | 9 +------ .../ethereum/engine-primitives/src/payload.rs | 26 +++++++++++-------- crates/ethereum/payload/src/lib.rs | 5 ++-- crates/optimism/payload/src/payload.rs | 10 ++++++- crates/payload/builder/src/lib.rs | 2 +- crates/payload/builder/src/test_utils.rs | 1 + crates/payload/primitives/Cargo.toml | 1 + crates/payload/primitives/src/traits.rs | 4 +++ 9 files changed, 37 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a99803e1bc4..bf73d7eef39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8373,6 +8373,7 @@ dependencies = [ name = "reth-payload-primitives" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -11956,4 +11957,4 @@ checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", -] \ No newline at end of file +] diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 729205211ff..cfa245e1de0 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -62,14 +62,7 @@ impl EngineApiTestContext::ExecutionPayloadEnvelopeV4 = payload.into(); EngineApiClient::::new_payload_v4( &self.engine_api_client, diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 50c4852545b..420352cf2b9 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -33,6 +33,8 @@ pub struct EthBuiltPayload { /// The blobs, proofs, and commitments in the block. If the block is pre-cancun, this will be /// empty. pub(crate) sidecars: Vec, + /// The requests of the payload + pub(crate) requests: Option, } // === impl BuiltPayload === @@ -46,8 +48,9 @@ impl EthBuiltPayload { block: SealedBlock, fees: U256, executed_block: Option, + requests: Option, ) -> Self { - Self { id, block, executed_block, fees, sidecars: Vec::new() } + Self { id, block, executed_block, fees, sidecars: Vec::new(), requests } } /// Returns the identifier of the payload. @@ -97,6 +100,10 @@ impl BuiltPayload for EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } impl BuiltPayload for &EthBuiltPayload { @@ -111,6 +118,10 @@ impl BuiltPayload for &EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } // V1 engine_getPayloadV1 response @@ -152,15 +163,8 @@ impl From for ExecutionPayloadEnvelopeV3 { impl From for ExecutionPayloadEnvelopeV4 { fn from(value: EthBuiltPayload) -> Self { - let EthBuiltPayload { block, fees, sidecars, executed_block, .. } = value; - - // if we have an executed block, we pop off the first set of requests from the execution - // outcome. the assumption here is that there will always only be one block in the execution - // outcome. - let execution_requests = executed_block - .and_then(|block| block.execution_outcome().requests.first().cloned()) - .map(Requests::take) - .unwrap_or_default(); + let EthBuiltPayload { block, fees, sidecars, requests, .. } = value; + Self { execution_payload: block_to_payload_v3(block), block_value: fees, @@ -174,7 +178,7 @@ impl From for ExecutionPayloadEnvelopeV4 { // should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), - execution_requests, + execution_requests: requests.unwrap_or_default().take(), } } } diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index bb611441f03..f14c145889c 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -360,7 +360,7 @@ where db.take_bundle(), vec![receipts].into(), block_number, - vec![requests.unwrap_or_default()], + vec![requests.clone().unwrap_or_default()], ); let receipts_root = execution_outcome.receipts_root_slow(block_number).expect("Number is in range"); @@ -449,7 +449,8 @@ where trie: Arc::new(trie_output), }; - let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed)); + let mut payload = + EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed), requests); // extend the payload with the blob sidecars from the executed txs payload.extend_sidecars(blob_sidecars); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index d5d1620e54b..98b0e41b0f5 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,7 +2,7 @@ //! Optimism builder support -use alloy_eips::eip2718::Decodable2718; +use alloy_eips::{eip2718::Decodable2718, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; @@ -178,6 +178,10 @@ impl BuiltPayload for OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } impl BuiltPayload for &OptimismBuiltPayload { @@ -192,6 +196,10 @@ impl BuiltPayload for &OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } // V1 engine_getPayloadV1 response diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 0df15f5b0de..7af61ac4c68 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -65,7 +65,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None, None); //! Ok(payload) //! } //! diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 6990dc9b174..676e60d912f 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -89,6 +89,7 @@ impl PayloadJob for TestPayloadJob { Block::default().seal_slow(), U256::ZERO, Some(ExecutedBlock::default()), + Some(Default::default()), )) } diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 27418ccd899..ad8ce63a7e9 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -20,6 +20,7 @@ reth-transaction-pool.workspace = true reth-chain-state.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } op-alloy-rpc-types-engine.workspace = true diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index ce98fcad32e..df76149028a 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,5 @@ use crate::{PayloadEvents, PayloadKind, PayloadTypes}; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types::{ engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, @@ -65,6 +66,9 @@ pub trait BuiltPayload: Send + Sync + std::fmt::Debug { fn executed_block(&self) -> Option { None } + + /// Returns the EIP-7865 requests for the payload if any. + fn requests(&self) -> Option; } /// This can be implemented by types that describe a currently running payload job. From cecdf611e948257a79e7c262501bdadbd174e44d Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Sat, 26 Oct 2024 08:11:27 +0200 Subject: [PATCH 171/970] feat: `map_chainspec` for `NodeConfig` (#12068) --- crates/exex/exex/src/dyn_context.rs | 22 +++------------------- crates/node/core/src/node_config.rs | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index 226f3a3feb9..b48a6ebc951 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -1,7 +1,7 @@ //! Mirrored version of [`ExExContext`](`crate::ExExContext`) //! without generic abstraction over [Node](`reth_node_api::FullNodeComponents`) -use std::{fmt::Debug, sync::Arc}; +use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; use reth_node_api::FullNodeComponents; @@ -55,24 +55,8 @@ where Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { - // convert `NodeConfig` with generic over chainspec into `NodeConfig` - let chain: Arc> = - Arc::new(Box::new(ctx.config.chain) as Box); - let config = NodeConfig { - chain, - datadir: ctx.config.datadir, - config: ctx.config.config, - metrics: ctx.config.metrics, - instance: ctx.config.instance, - network: ctx.config.network, - rpc: ctx.config.rpc, - txpool: ctx.config.txpool, - builder: ctx.config.builder, - debug: ctx.config.debug, - db: ctx.config.db, - dev: ctx.config.dev, - pruning: ctx.config.pruning, - }; + let config = + ctx.config.map_chainspec(|chainspec| Box::new(chainspec) as Box); let notifications = Box::new(ctx.notifications) as Box; Self { diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index a8799d80df1..80fb5152e7b 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -422,6 +422,29 @@ impl NodeConfig { Err(e) => Err(eyre!("Failed to load configuration: {e}")), } } + + /// Modifies the [`ChainSpec`] generic of the config using the provided closure. + pub fn map_chainspec(self, f: F) -> NodeConfig + where + F: FnOnce(Arc) -> C, + { + let chain = Arc::new(f(self.chain)); + NodeConfig { + chain, + datadir: self.datadir, + config: self.config, + metrics: self.metrics, + instance: self.instance, + network: self.network, + rpc: self.rpc, + txpool: self.txpool, + builder: self.builder, + debug: self.debug, + db: self.db, + dev: self.dev, + pruning: self.pruning, + } + } } impl Default for NodeConfig { From ac329bfce1d6ebf90e06fc977ff43e40f20cf084 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Sat, 26 Oct 2024 03:44:47 -0400 Subject: [PATCH 172/970] perf: improve debug_traceBlock performance (#11979) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/debug.rs | 78 +++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 25 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 2d9d6f7822e..5a20bee975f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -112,6 +112,7 @@ where let mut results = Vec::with_capacity(transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut transactions = transactions.into_iter().enumerate().peekable(); + let mut inspector = None; while let Some((index, tx)) = transactions.next() { let tx_hash = tx.hash; @@ -124,7 +125,7 @@ where handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( - opts.clone(), + &opts, env, &mut db, Some(TransactionContext { @@ -132,8 +133,11 @@ where tx_hash: Some(tx_hash), tx_index: Some(index), }), + &mut inspector, )?; + inspector = inspector.map(|insp| insp.fused()); + results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { // need to apply the state changes of this transaction before executing the @@ -295,7 +299,7 @@ where }; this.trace_transaction( - opts, + &opts, env, &mut db, Some(TransactionContext { @@ -303,6 +307,7 @@ where tx_index: Some(index), tx_hash: Some(tx.hash), }), + &mut None, ) .map(|(trace, _)| trace) }) @@ -573,6 +578,7 @@ where let Bundle { transactions, block_override } = bundle; let block_overrides = block_override.map(Box::new); + let mut inspector = None; let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -588,8 +594,15 @@ where overrides, )?; - let (trace, state) = - this.trace_transaction(tracing_options.clone(), env, &mut db, None)?; + let (trace, state) = this.trace_transaction( + &tracing_options, + env, + &mut db, + None, + &mut inspector, + )?; + + inspector = inspector.map(|insp| insp.fused()); // If there is more transactions, commit the database // If there is no transactions, but more bundles, commit to the database too @@ -692,6 +705,13 @@ where /// Executes the configured transaction with the environment on the given database. /// + /// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the + /// inspector for each transaction. This is useful when tracing multiple transactions in a + /// block. This is only useful for block tracing which uses the same tracer for all transactions + /// in the block. + /// + /// Caution: If the inspector is provided then `opts.tracer_config` is ignored. + /// /// Returns the trace frame and the state that got updated after executing the transaction. /// /// Note: this does not apply any state overrides if they're configured in the `opts`. @@ -699,10 +719,11 @@ where /// Caution: this is blocking and should be performed on a blocking task. fn trace_transaction( &self, - opts: GethDebugTracingOptions, + opts: &GethDebugTracingOptions, env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, + fused_inspector: &mut Option, ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -716,35 +737,42 @@ where } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config + .clone() .into_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_call_config(&call_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new(TracingInspectorConfig::from_geth_call_config( + &call_config, + )) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::PreStateTracer => { let prestate_config = tracer_config + .clone() .into_pre_state_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new( + TracingInspectorConfig::from_geth_prestate_config(&prestate_config), + ) + }); let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; @@ -755,6 +783,7 @@ where } GethDebugBuiltInTracerType::MuxTracer => { let mux_config = tracer_config + .clone() .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -769,6 +798,7 @@ where } GethDebugBuiltInTracerType::FlatCallTracer => { let flat_call_config = tracer_config + .clone() .into_flat_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -799,10 +829,10 @@ where } #[cfg(feature = "js-tracer")] GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.into_json(); + let config = tracer_config.clone().into_json(); let mut inspector = revm_inspectors::tracing::js::JsInspector::with_transaction_context( - code, + code.clone(), config, transaction_context.unwrap_or_default(), ) @@ -818,17 +848,15 @@ where } // default structlog tracer - let inspector_config = TracingInspectorConfig::from_geth_config(&config); - - let mut inspector = TracingInspector::new(inspector_config); - + let mut inspector = fused_inspector.get_or_insert_with(|| { + let inspector_config = TracingInspectorConfig::from_geth_config(config); + TracingInspector::new(inspector_config) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); Ok((frame.into(), res.state)) } From 44e4c47803f26585872ddf0f25848092a4f29c73 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 26 Oct 2024 18:55:26 +0800 Subject: [PATCH 173/970] chore(sdk): add helper trait to node API to simplify type definition (#10616) --- Cargo.lock | 5 +- crates/consensus/debug-client/Cargo.toml | 2 +- crates/e2e-test-utils/src/rpc.rs | 7 ++- crates/ethereum/node/tests/e2e/dev.rs | 2 +- crates/node/api/Cargo.toml | 1 - crates/node/api/src/lib.rs | 2 - crates/node/builder/src/launch/mod.rs | 17 ++++--- crates/node/builder/src/node.rs | 2 +- crates/node/builder/src/rpc.rs | 6 ++- crates/node/core/Cargo.toml | 4 +- crates/node/core/src/lib.rs | 9 ---- crates/optimism/rpc/src/eth/mod.rs | 40 ++++++++-------- crates/rpc/rpc-eth-api/Cargo.toml | 1 + crates/rpc/rpc-eth-api/src/lib.rs | 2 + crates/rpc/rpc-eth-api/src/node.rs | 58 ++++++++++++++++++++++++ crates/rpc/rpc/Cargo.toml | 1 - crates/rpc/rpc/src/eth/filter.rs | 5 +- crates/rpc/rpc/src/eth/mod.rs | 2 +- 18 files changed, 105 insertions(+), 61 deletions(-) create mode 100644 crates/rpc/rpc-eth-api/src/node.rs diff --git a/Cargo.lock b/Cargo.lock index bf73d7eef39..f4dc7dae4e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7866,7 +7866,6 @@ dependencies = [ "reth-payload-primitives", "reth-primitives", "reth-provider", - "reth-rpc-eth-api", "reth-tasks", "reth-transaction-pool", ] @@ -7966,8 +7965,6 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-prune-types", - "reth-rpc-api", - "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -8629,7 +8626,6 @@ dependencies = [ "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-node-api", "reth-primitives", "reth-provider", "reth-revm", @@ -8812,6 +8808,7 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-network-api", + "reth-node-api", "reth-primitives", "reth-provider", "reth-revm", diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index c37beef1074..e73125a80bd 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth reth-node-api.workspace = true -reth-rpc-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-builder.workspace = true reth-tracing.workspace = true diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index b8cbe4d77ad..7b7dabdf240 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -4,12 +4,15 @@ use alloy_primitives::{Bytes, B256}; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, rpc::api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, + eth::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, + }, DebugApiServer, }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{EthApiTypes, NodeTypes}; +use reth_node_builder::NodeTypes; #[allow(missing_debug_implementations)] pub struct RpcTestContext { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index ead438b5a67..f0fcaf64524 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -4,7 +4,7 @@ use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, core::rpc::eth::helpers::EthTransactions}; +use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; use reth_e2e_test_utils::setup; use reth_node_api::FullNodeComponents; diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index c2c3eb46326..6b263d6c532 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -21,7 +21,6 @@ reth-transaction-pool.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true -reth-rpc-eth-api.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true reth-primitives.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 7692ed6f2ca..099cf82b5fe 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -25,5 +25,3 @@ pub use node::*; // re-export for convenience pub use reth_node_types::*; pub use reth_provider::FullProvider; - -pub use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 36aa55541e0..50438e79d2b 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -23,15 +23,14 @@ use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{ - AddOnsContext, FullNodeComponents, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_provider::providers::BlockchainProvider; +use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -47,14 +46,14 @@ use crate::{ AddOns, NodeBuilderWithComponents, NodeHandle, }; -/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`FullNodeComponents`]. +/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< - ::Provider, - ::Pool, - ::Evm, - ::Network, + ::Provider, + ::Pool, + ::Evm, + ::Network, TaskExecutor, - ::Provider, + ::Provider, >; /// A general purpose trait that launches a new node of any kind. diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3e3d5b696c3..3b2f467d61c 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -11,10 +11,10 @@ use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - rpc::api::EngineApiClient, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; +use reth_rpc_api::EngineApiClient; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 18293118dc6..4c1ea32d045 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -14,12 +14,14 @@ use reth_node_api::{ }; use reth_node_core::{ node_config::NodeConfig, - rpc::eth::{EthApiTypes, FullEthApiServer}, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::providers::ProviderNodeTypes; -use reth_rpc::EthApi; +use reth_rpc::{ + eth::{EthApiTypes, FullEthApiServer}, + EthApi, +}; use reth_rpc_api::eth::helpers::AddDevSigners; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 0c9672d1777..1c6c9d98c80 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -23,8 +23,6 @@ reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true -reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -38,7 +36,7 @@ reth-stages-types.workspace = true # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-rpc-types-engine = { workspace = true, features = ["std", "jwt"] } alloy-consensus.workspace = true alloy-eips.workspace = true diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 6af822e22ee..a69a255a3c6 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -22,15 +22,6 @@ pub mod primitives { /// Re-export of `reth_rpc_*` crates. pub mod rpc { - /// Re-exported from `reth_rpc_api`. - pub mod api { - pub use reth_rpc_api::*; - } - /// Re-exported from `reth_rpc::eth`. - pub mod eth { - pub use reth_rpc_eth_api::*; - } - /// Re-exported from `reth_rpc::rpc`. pub mod result { pub use reth_rpc_server_types::result::*; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 04774a4651c..a1a9f6e8f04 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -17,12 +17,12 @@ use op_alloy_network::Optimism; use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, - StageCheckpointReader, StateProviderFactory, + BlockIdReader, BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + HeaderProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -30,7 +30,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, + EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -43,10 +43,10 @@ use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, + ::Provider, + ::Pool, + ::Network, + ::Evm, >; /// OP-Reth `Eth` API implementation. @@ -59,8 +59,8 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Deref)] -pub struct OpEthApi { +#[derive(Deref, Clone)] +pub struct OpEthApi { /// Gateway to node's core components. #[deref] inner: Arc>, @@ -69,7 +69,12 @@ pub struct OpEthApi { sequencer_client: Option, } -impl OpEthApi { +impl OpEthApi +where + N: RpcNodeCore< + Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + >, +{ /// Creates a new instance for given context. pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { let blocking_task_pool = @@ -98,7 +103,7 @@ impl OpEthApi { impl EthApiTypes for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: RpcNodeCore, { type Error = OpEthApiError; type NetworkTypes = Optimism; @@ -248,17 +253,8 @@ where } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } - -impl Clone for OpEthApi -where - N: FullNodeComponents, -{ - fn clone(&self) -> Self { - Self { inner: self.inner.clone(), sequencer_client: self.sequencer_client.clone() } - } -} diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 9d0f6cfd83d..edfd57b201d 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,6 +30,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-trie.workspace = true +reth-node-api.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 849c8e2e4c8..bc46d526c6f 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -16,6 +16,7 @@ pub mod bundle; pub mod core; pub mod filter; pub mod helpers; +pub mod node; pub mod pubsub; pub mod types; @@ -25,6 +26,7 @@ pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +pub use node::RpcNodeCore; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs new file mode 100644 index 00000000000..8488677e32f --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -0,0 +1,58 @@ +//! Helper trait for interfacing with [`FullNodeComponents`]. + +use reth_node_api::FullNodeComponents; + +/// Helper trait to relax trait bounds on [`FullNodeComponents`]. +/// +/// Helpful when defining types that would otherwise have a generic `N: FullNodeComponents`. Using +/// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] +/// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto +/// traits). +pub trait RpcNodeCore: Clone { + /// The provider type used to interact with the node. + type Provider: Send + Sync + Clone + Unpin; + /// The transaction pool of the node. + type Pool: Send + Sync + Clone + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: Send + Sync + Clone + Unpin; + /// Network API. + type Network: Send + Sync + Clone; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the handle to the network + fn network(&self) -> &Self::Network; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; +} + +impl RpcNodeCore for T +where + T: FullNodeComponents, +{ + type Provider = T::Provider; + type Pool = T::Pool; + type Network = ::Network; + type Evm = ::Evm; + + fn pool(&self) -> &Self::Pool { + FullNodeComponents::pool(self) + } + + fn evm_config(&self) -> &Self::Evm { + FullNodeComponents::evm_config(self) + } + + fn network(&self) -> &Self::Network { + FullNodeComponents::network(self) + } + + fn provider(&self) -> &Self::Provider { + FullNodeComponents::provider(self) + } +} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index fe150e36eed..dab86ac2587 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -31,7 +31,6 @@ reth-network-peers = { workspace = true, features = ["secp256k1"] } reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-node-api.workspace = true reth-network-types.workspace = true reth-trie.workspace = true diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 24058da1734..5ef224609c5 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -17,10 +17,11 @@ use alloy_rpc_types::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_node_api::EthApiTypes; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; -use reth_rpc_eth_api::{EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat}; +use reth_rpc_eth_api::{ + EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, +}; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, EthApiError, EthFilterConfig, EthStateCache, EthSubscriptionIdProvider, diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 99919110da7..4d1833add3e 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -15,4 +15,4 @@ pub use pubsub::EthPubSub; pub use helpers::{signer::DevSigner, types::EthTxBuilder}; -pub use reth_rpc_eth_api::EthApiServer; +pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; From a06c3af8320caaa95adeffbca38b9ed945019557 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 26 Oct 2024 21:03:22 +0800 Subject: [PATCH 174/970] chore(rpc): Remove provider and network trait methods from `EthApiSpec` (#12050) --- crates/optimism/rpc/src/eth/mod.rs | 41 ++++++++++++++++------ crates/rpc/rpc-builder/src/lib.rs | 5 +-- crates/rpc/rpc-eth-api/src/helpers/spec.rs | 20 +++++------ crates/rpc/rpc/src/eth/core.rs | 31 +++++++++++++++- crates/rpc/rpc/src/eth/helpers/spec.rs | 14 ++------ 5 files changed, 75 insertions(+), 36 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index a1a9f6e8f04..ccff477892f 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -114,24 +114,43 @@ where } } -impl EthApiSpec for OpEthApi +impl RpcNodeCore for OpEthApi where - Self: Send + Sync, - N: FullNodeComponents>, + Self: Clone, + N: RpcNodeCore, { - #[inline] - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() + type Provider = N::Provider; + type Pool = N::Pool; + type Network = ::Network; + type Evm = ::Evm; + + fn pool(&self) -> &Self::Pool { + self.inner.pool() } - #[inline] - fn network(&self) -> impl NetworkInfo { + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + fn network(&self) -> &Self::Network { self.inner.network() } + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl EthApiSpec for OpEthApi +where + Self: Send + Sync, + N: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, +{ #[inline] fn starting_block(&self) -> U256 { self.inner.starting_block() diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ceafe206531..fc98cd6ff4e 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -36,7 +36,7 @@ //! block_executor: BlockExecutor, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm

, @@ -85,6 +85,7 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! use tokio::try_join; +//! //! pub async fn launch< //! Provider, //! Pool, @@ -104,7 +105,7 @@ //! block_executor: BlockExecutor, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 5976cf29c07..5aa0509e8bb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -8,21 +8,21 @@ use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use super::EthSigner; +use crate::{helpers::EthSigner, RpcNodeCore}; /// `Eth` API trait. /// /// Defines core functionality of the `eth` API implementation. #[auto_impl::auto_impl(&, Arc)] -pub trait EthApiSpec: Send + Sync { - /// Returns a handle for reading data from disk. - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader; - - /// Returns a handle for reading network data summary. - fn network(&self) -> impl NetworkInfo; - +pub trait EthApiSpec: + RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + > + Send + + Sync +{ /// Returns the block node is started on. fn starting_block(&self) -> U256; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 21787873e96..3fca76e8b0c 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,7 +10,7 @@ use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, - EthApiTypes, + EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, @@ -140,6 +140,35 @@ where } } +impl RpcNodeCore for EthApi +where + Provider: Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, +{ + type Provider = Provider; + type Pool = Pool; + type Network = Network; + type Evm = EvmConfig; + + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + fn network(&self) -> &Self::Network { + self.inner.network() + } + + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + impl std::fmt::Debug for EthApi { diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 92445bf5ed1..c5c8d54c64b 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -2,13 +2,14 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use reth_rpc_eth_api::helpers::EthApiSpec; +use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use reth_transaction_pool::TransactionPool; use crate::EthApi; impl EthApiSpec for EthApi where + Self: RpcNodeCore, Pool: TransactionPool + 'static, Provider: ChainSpecProvider + BlockNumReader @@ -17,17 +18,6 @@ where Network: NetworkInfo + 'static, EvmConfig: Send + Sync, { - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() - } - - fn network(&self) -> impl NetworkInfo { - self.inner.network() - } - fn starting_block(&self) -> U256 { self.inner.starting_block() } From 019f347385741d75638effee1fb0b640ae4a363b Mon Sep 17 00:00:00 2001 From: Yu Zeng Date: Sat, 26 Oct 2024 23:04:17 +0800 Subject: [PATCH 175/970] chore: move optimism execution types test to optimism crate (#12026) --- .../execution-types/src/execution_outcome.rs | 56 +-- crates/optimism/evm/src/lib.rs | 414 +++++++++++++++++- 2 files changed, 423 insertions(+), 47 deletions(-) diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 0fde01547f7..026e6b37c42 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -168,7 +168,7 @@ impl ExecutionOutcome { } /// Transform block number to the index of block. - fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + pub fn block_number_to_index(&self, block_number: BlockNumber) -> Option { if self.first_block > block_number { return None } @@ -366,12 +366,15 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { #[cfg(test)] mod tests { use super::*; - use alloy_eips::eip7685::Requests; - use alloy_primitives::{bytes, Address, LogData, B256}; - use reth_primitives::{Receipts, TxType}; - use std::collections::HashMap; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::bytes; + use alloy_primitives::{Address, B256}; + use reth_primitives::Receipts; + #[cfg(not(feature = "optimism"))] + use reth_primitives::{LogData, TxType}; #[test] + #[cfg(not(feature = "optimism"))] fn test_initialisation() { // Create a new BundleState object with initial data let bundle = BundleState::new( @@ -387,10 +390,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -444,6 +443,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -452,10 +452,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -482,6 +478,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -490,10 +487,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -517,6 +510,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -525,10 +519,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -555,15 +545,12 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })] ); } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { @@ -572,10 +559,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -616,6 +599,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object let receipt = Receipt { @@ -623,10 +607,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -668,6 +648,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. let receipt = Receipt { @@ -675,10 +656,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object containing the receipt. @@ -715,6 +692,7 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object let receipt = Receipt { @@ -722,10 +700,6 @@ mod tests { cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 60aa9f7db08..bc46f3ea9c2 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -206,23 +206,30 @@ impl ConfigureEvm for OptimismEvmConfig { mod tests { use super::*; use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; - use alloy_primitives::{B256, U256}; + use alloy_primitives::{bytes, Address, LogData, B256, U256}; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; - use reth_execution_types::{Chain, ExecutionOutcome}; + use reth_execution_types::{ + AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, + }; use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, Receipt, Receipts, SealedBlockWithSenders, TxType, + revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, + Account, Header, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, }; + use reth_revm::{ - db::{CacheDB, EmptyDBTyped}, + db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; - use std::{collections::HashSet, sync::Arc}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; fn test_evm_config() -> OptimismEvmConfig { OptimismEvmConfig::new(BASE_MAINNET.clone()) @@ -620,4 +627,399 @@ mod tests { // Assert that the execution outcome at the tip block contains the whole execution outcome assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); } + + #[test] + fn test_initialisation() { + // Create a new BundleState object with initial data + let bundle = BundleState::new( + vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(Address::new([2; 20]), None, vec![])]], + vec![], + ); + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: bundle.clone(), + receipts: receipts.clone(), + requests: requests.clone(), + first_block, + }; + + // Assert that creating a new ExecutionOutcome using the constructor matches exec_res + assert_eq!( + ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()), + exec_res + ); + + // Create a BundleStateInit object and insert initial data + let mut state_init: BundleStateInit = HashMap::default(); + state_init + .insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default())); + + // Create a HashMap for account reverts and insert initial data + let mut revert_inner: HashMap = HashMap::default(); + revert_inner.insert(Address::new([2; 20]), (None, vec![])); + + // Create a RevertsInit object and insert the revert_inner data + let mut revert_init: RevertsInit = HashMap::default(); + revert_init.insert(123, revert_inner); + + // Assert that creating a new ExecutionOutcome using the new_init method matches + // exec_res + assert_eq!( + ExecutionOutcome::new_init( + state_init, + revert_init, + vec![], + receipts, + first_block, + requests, + ), + exec_res + ); + } + + #[test] + fn test_block_number_to_index() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Test before the first block + assert_eq!(exec_res.block_number_to_index(12), None); + + // Test after after the first block but index larger than receipts length + assert_eq!(exec_res.block_number_to_index(133), None); + + // Test after the first block + assert_eq!(exec_res.block_number_to_index(123), Some(0)); + } + + #[test] + fn test_get_logs() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Get logs for block number 123 + let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect(); + + // Assert that the logs match the expected logs + assert_eq!(logs, vec![&Log::::default()]); + } + + #[test] + fn test_receipts_by_block() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Get receipts for block number 123 and convert the result into a vector + let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect(); + + // Assert that the receipts for block number 123 match the expected receipts + assert_eq!( + receipts_by_block, + vec![&Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })] + ); + } + + #[test] + fn test_receipts_len() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create an empty Receipts object + let receipts_empty = Receipts { receipt_vec: vec![] }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res is 1 + assert_eq!(exec_res.len(), 1); + + // Assert that exec_res is not empty + assert!(!exec_res.is_empty()); + + // Create a ExecutionOutcome object with an empty Receipts object + let exec_res_empty_receipts = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts: receipts_empty, // Include the empty receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res_empty_receipts is 0 + assert_eq!(exec_res_empty_receipts.len(), 0); + + // Assert that exec_res_empty_receipts is empty + assert!(exec_res_empty_receipts.is_empty()); + } + + #[test] + fn test_revert_to() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Assert that the revert_to method returns true when reverting to the initial block number. + assert!(exec_res.revert_to(123)); + + // Assert that the receipts are properly cut after reverting to the initial block number. + assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); + + // Assert that the requests are properly cut after reverting to the initial block number. + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number greater than the initial block number. + assert!(!exec_res.revert_to(133)); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number less than the initial block number. + assert!(!exec_res.revert_to(10)); + } + + #[test] + fn test_extend_execution_outcome() { + // Create a Receipt object with specific attributes. + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object containing the receipt. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![Requests::new(vec![request.clone()])]; + + // Define the initial block number. + let first_block = 123; + + // Create an ExecutionOutcome object. + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Extend the ExecutionOutcome object by itself. + exec_res.extend(exec_res.clone()); + + // Assert the extended ExecutionOutcome matches the expected outcome. + assert_eq!( + exec_res, + ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 123, + } + ); + } + + #[test] + fn test_split_at_execution_outcome() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![ + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + ], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Split the ExecutionOutcome at block number 124 + let result = exec_res.clone().split_at(124); + + // Define the expected lower ExecutionOutcome after splitting + let lower_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, + requests: vec![Requests::new(vec![request.clone()])], + first_block, + }; + + // Define the expected higher ExecutionOutcome after splitting + let higher_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 124, + }; + + // Assert that the split result matches the expected lower and higher outcomes + assert_eq!(result.0, Some(lower_execution_outcome)); + assert_eq!(result.1, higher_execution_outcome); + + // Assert that splitting at the first block number returns None for the lower outcome + assert_eq!(exec_res.clone().split_at(123), (None, exec_res)); + } } From d5f5c0f11226879b3c7e46cf7226506871d689da Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 26 Oct 2024 23:40:46 +0800 Subject: [PATCH 176/970] chore(rpc): set `RpcNodeCore` as supertrait for `LoadState` (#12094) --- crates/optimism/rpc/src/eth/mod.rs | 19 +++-------- crates/rpc/rpc-eth-api/src/helpers/call.rs | 8 ++--- crates/rpc/rpc-eth-api/src/helpers/spec.rs | 11 +++--- crates/rpc/rpc-eth-api/src/helpers/state.rs | 34 ++++++++----------- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 8 ++--- .../rpc-eth-api/src/helpers/transaction.rs | 6 ++-- crates/rpc/rpc-eth-api/src/node.rs | 2 +- crates/rpc/rpc/src/debug.rs | 6 ++-- crates/rpc/rpc/src/eth/helpers/state.rs | 24 +++++-------- 9 files changed, 48 insertions(+), 70 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index ccff477892f..bc1692dff4e 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -143,7 +143,6 @@ where impl EthApiSpec for OpEthApi where - Self: Send + Sync, N: RpcNodeCore< Provider: ChainSpecProvider + BlockNumReader @@ -213,25 +212,15 @@ where impl LoadState for OpEthApi where - Self: Send + Sync + Clone, - N: FullNodeComponents>, + N: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + >, { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } impl EthState for OpEthApi diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 1510233c505..89ae1c8ac96 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -486,7 +486,7 @@ pub trait Call: LoadState + SpawnBlocking { DB: Database, EthApiError: From, { - let mut evm = self.evm_config().evm_with_env(db, env); + let mut evm = Call::evm_config(self).evm_with_env(db, env); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) @@ -504,7 +504,7 @@ pub trait Call: LoadState + SpawnBlocking { DB: Database, EthApiError: From, { - let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let mut evm = Call::evm_config(self).evm_with_env_and_inspector(db, env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) @@ -669,7 +669,7 @@ pub trait Call: LoadState + SpawnBlocking { { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); - let mut evm = self.evm_config().evm_with_env(db, env); + let mut evm = Call::evm_config(self).evm_with_env(db, env); let mut index = 0; for (sender, tx) in transactions { if tx.hash() == target_tx_hash { @@ -677,7 +677,7 @@ pub trait Call: LoadState + SpawnBlocking { break } - self.evm_config().fill_tx_env(evm.tx_mut(), tx, *sender); + Call::evm_config(self).fill_tx_env(evm.tx_mut(), tx, *sender); evm.transact_commit().map_err(Self::Error::from_evm_err)?; index += 1; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 5aa0509e8bb..a6213017af8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -16,12 +16,11 @@ use crate::{helpers::EthSigner, RpcNodeCore}; #[auto_impl::auto_impl(&, Arc)] pub trait EthApiSpec: RpcNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - > + Send - + Sync + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, +> { /// Returns the block node is started on. fn starting_block(&self) -> U256; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 080d90dc3b0..2a15b194f13 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -18,7 +18,7 @@ use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore}; use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; @@ -105,7 +105,8 @@ pub trait EthState: LoadState + SpawnBlocking { let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. - let block_number = LoadState::provider(self) + let block_number = self + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -138,9 +139,9 @@ pub trait EthState: LoadState + SpawnBlocking { let Some(account) = account else { return Ok(None) }; // Check whether the distance to the block exceeds the maximum configured proof window. - let chain_info = - LoadState::provider(&this).chain_info().map_err(Self::Error::from_eth_err)?; - let block_number = LoadState::provider(&this) + let chain_info = this.provider().chain_info().map_err(Self::Error::from_eth_err)?; + let block_number = this + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -167,24 +168,19 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: EthApiTypes { - /// Returns a handle for reading state from database. - /// - /// Data access in default trait method implementations. - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider; - +pub trait LoadState: + EthApiTypes + + RpcNodeCore< + Provider: StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + > +{ /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default trait method implementations. - fn pool(&self) -> impl TransactionPool; - /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) @@ -266,7 +262,7 @@ pub trait LoadState: EthApiTypes { let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; - self.evm_config().fill_block_env(&mut block_env, header, after_merge); + LoadPendingBlock::evm_config(self).fill_block_env(&mut block_env, header, after_merge); Ok((cfg, block_env)) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 64056148cd3..4f11734849a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use crate::FromEvmError; +use crate::{FromEvmError, RpcNodeCore}; use alloy_primitives::B256; use alloy_rpc_types::{BlockId, TransactionInfo}; use futures::Future; @@ -60,7 +60,7 @@ pub trait Trace: LoadState { I: GetInspector, { - let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let mut evm = Trace::evm_config(self).evm_with_env_and_inspector(db, env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (db, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env, db)) @@ -202,7 +202,7 @@ pub trait Trace: LoadState { // apply relevant system calls let mut system_caller = SystemCaller::new( Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), + RpcNodeCore::provider(&this).chain_spec(), ); system_caller .pre_block_beacon_root_contract_call( @@ -345,7 +345,7 @@ pub trait Trace: LoadState { // apply relevant system calls let mut system_caller = SystemCaller::new( Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), + RpcNodeCore::provider(&this).chain_spec(), ); system_caller .pre_block_beacon_root_contract_call( diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 0d16a5c9145..a91e4e6faef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -21,7 +21,9 @@ use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_blo use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use crate::{FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcReceipt, RpcTransaction}; +use crate::{ + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcReceipt, RpcTransaction, +}; use super::{ Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, @@ -235,7 +237,7 @@ pub trait EthTransactions: LoadTransaction { // Check the pool first if include_pending { if let Some(tx) = - LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) + RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 8488677e32f..950271dfcb1 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -8,7 +8,7 @@ use reth_node_api::FullNodeComponents; /// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] /// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto /// traits). -pub trait RpcNodeCore: Clone { +pub trait RpcNodeCore: Clone + Send + Sync { /// The provider type used to interact with the node. type Provider: Send + Sync + Clone + Unpin; /// The transaction pool of the node. diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 5a20bee975f..eeab734a643 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -27,8 +27,8 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, LoadState, TraceExt}, - EthApiTypes, FromEthApiError, + helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -264,7 +264,7 @@ where // apply relevant system calls let mut system_caller = SystemCaller::new( Call::evm_config(this.eth_api()).clone(), - LoadState::provider(this.eth_api()).chain_spec(), + RpcNodeCore::provider(this.eth_api()).chain_spec(), ); system_caller diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 429a10333d1..8c958ea2ae2 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -4,7 +4,10 @@ use reth_chainspec::EthereumHardforks; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; +use reth_rpc_eth_api::{ + helpers::{EthState, LoadState, SpawnBlocking}, + RpcNodeCore, +}; use reth_rpc_eth_types::EthStateCache; use crate::EthApi; @@ -20,26 +23,15 @@ where impl LoadState for EthApi where - Self: Send + Sync, - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Self: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + >, { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } #[cfg(test)] From 09ebecffc7ffafbe965f6f652032b95e6acac473 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 26 Oct 2024 18:11:57 +0200 Subject: [PATCH 177/970] prune: add unit tests for `PruneInput` `get_next_tx_num_range` (#12081) --- crates/prune/prune/src/segments/mod.rs | 209 ++++++++++++++++++++++++- crates/prune/prune/src/segments/set.rs | 4 +- 2 files changed, 208 insertions(+), 5 deletions(-) diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index d1b7819ac76..b3b40aab5b3 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -23,9 +23,9 @@ pub use user::{ /// A segment represents a pruning of some portion of the data. /// -/// Segments are called from [Pruner](crate::Pruner) with the following lifecycle: +/// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle: /// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`]. -/// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`SegmentOutput`], call +/// 2. If [`Segment::prune`] returned a [`Some`] in `checkpoint` of [`SegmentOutput`], call /// [`Segment::save_checkpoint`]. /// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`]. pub trait Segment: Debug + Send + Sync { @@ -88,7 +88,7 @@ impl PruneInput { }, }) // No checkpoint exists, prune from genesis - .unwrap_or(0); + .unwrap_or_default(); let to_tx_number = match provider.block_body_indices(self.to_block)? { Some(body) => { @@ -143,3 +143,206 @@ impl PruneInput { .unwrap_or(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_provider::{ + providers::BlockchainProvider2, + test_utils::{create_test_provider_factory, MockEthProvider}, + }; + use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; + + #[test] + fn test_prune_input_get_next_tx_num_range_no_to_block() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // Default provider with no block corresponding to block 10 + let provider = MockEthProvider::default(); + + // No block body for block 10, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_no_tx() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with no transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Since there are no transactions, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_valid() { + // Create a new prune input + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with some transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the next tx number range + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1); + } + + #[test] + fn test_prune_input_get_next_tx_checkpoint_without_tx_number() { + // Create a prune input with a previous checkpoint without a tx number (unexpected) + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: None, + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Fetch the range and check if it is correct + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1,); + } + + #[test] + fn test_prune_input_get_next_tx_empty_range() { + // Create a new provider via factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the last tx number + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + let max_range = num_txs - 1; + + // Create a prune input with a previous checkpoint that is the last tx number + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: Some(max_range), + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // We expect an empty range since the previous checkpoint is the last tx number + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } +} diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 710b2b721cd..23d03345b09 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -11,7 +11,7 @@ use reth_prune_types::PruneModes; use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; -/// Collection of [Segment]. Thread-safe, allocated on the heap. +/// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { inner: Vec>>, @@ -23,7 +23,7 @@ impl SegmentSet { Self::default() } - /// Adds new [Segment] to collection. + /// Adds new [`Segment`] to collection. pub fn segment + 'static>(mut self, segment: S) -> Self { self.inner.push(Box::new(segment)); self From f616de6d94670687aedfd46e43fb991041cd5936 Mon Sep 17 00:00:00 2001 From: Ryan Schneider Date: Sat, 26 Oct 2024 09:15:08 -0700 Subject: [PATCH 178/970] feat(rpc): Start to implement flashbots_validateBuilderSubmissionV3 (#12061) --- Cargo.lock | 1 + book/cli/reth/node.md | 4 +- crates/rpc/rpc-api/src/validation.rs | 9 ++- crates/rpc/rpc-builder/src/lib.rs | 10 ++- crates/rpc/rpc-server-types/src/module.rs | 3 + crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/lib.rs | 3 + crates/rpc/rpc/src/validation.rs | 94 +++++++++++++++++++++++ 8 files changed, 121 insertions(+), 4 deletions(-) create mode 100644 crates/rpc/rpc/src/validation.rs diff --git a/Cargo.lock b/Cargo.lock index f4dc7dae4e9..b1ca1c9259c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8598,6 +8598,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types", "alloy-rpc-types-admin", + "alloy-rpc-types-beacon", "alloy-rpc-types-debug", "alloy-rpc-types-eth", "alloy-rpc-types-mev", diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 4cd55db1fe0..a3ff8f6a57b 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --ipcdisable Disable the IPC-RPC server diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index bbfa673d259..e1819dde440 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -1,7 +1,7 @@ //! API for block submission validation. use alloy_rpc_types_beacon::relay::{ - BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, }; use jsonrpsee::proc_macros::rpc; @@ -22,4 +22,11 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV2, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV3")] + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index fc98cd6ff4e..787dce08b8d 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -180,7 +180,7 @@ use reth_provider::{ }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, Web3Api, + TxPoolApi, ValidationApi, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -1067,6 +1067,11 @@ where pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } + + /// Instantiates `ValidationApi` + pub fn validation_api(&self) -> ValidationApi { + ValidationApi::new(self.provider.clone()) + } } impl @@ -1223,6 +1228,9 @@ where .into_rpc() .into() } + RethRpcModule::Flashbots => { + ValidationApi::new(self.provider.clone()).into_rpc().into() + } }) .clone() }) diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 56417dda701..9f96ff0cef3 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -258,6 +258,8 @@ pub enum RethRpcModule { Reth, /// `ots_` module Ots, + /// `flashbots_` module + Flashbots, } // === impl RethRpcModule === @@ -306,6 +308,7 @@ impl FromStr for RethRpcModule { "rpc" => Self::Rpc, "reth" => Self::Reth, "ots" => Self::Ots, + "flashbots" => Self::Flashbots, _ => return Err(ParseError::VariantNotFound), }) } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index dab86ac2587..00799d761d3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -45,6 +45,7 @@ alloy-network.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-debug.workspace = true alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index eec14981bf5..027edea3cc1 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -42,7 +42,9 @@ mod reth; mod rpc; mod trace; mod txpool; +mod validation; mod web3; + pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; @@ -53,4 +55,5 @@ pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; +pub use validation::ValidationApi; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs new file mode 100644 index 00000000000..c6419dc12c0 --- /dev/null +++ b/crates/rpc/rpc/src/validation.rs @@ -0,0 +1,94 @@ +use alloy_rpc_types_beacon::relay::{ + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, +}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_chainspec::ChainSpecProvider; +use reth_provider::{ + AccountReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, WithdrawalsProvider, +}; +use reth_rpc_api::BlockSubmissionValidationApiServer; +use reth_rpc_server_types::result::internal_rpc_err; +use std::sync::Arc; +use tracing::warn; + +/// The type that implements the `validation` rpc namespace trait +pub struct ValidationApi { + inner: Arc>, +} + +impl ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, +{ + /// The provider that can interact with the chain. + pub fn provider(&self) -> Provider { + self.inner.provider.clone() + } + + /// Create a new instance of the [`ValidationApi`] + pub fn new(provider: Provider) -> Self { + let inner = Arc::new(ValidationApiInner { provider }); + Self { inner } + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + warn!("flashbots_validateBuilderSubmissionV3: blindly accepting request without validation {:?}", request); + Ok(()) + } +} + +impl std::fmt::Debug for ValidationApi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ValidationApi").finish_non_exhaustive() + } +} + +impl Clone for ValidationApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, +} From b2574080609f8fd2584c0c95aa2555b45ff91288 Mon Sep 17 00:00:00 2001 From: lazymio Date: Sun, 27 Oct 2024 00:17:21 +0800 Subject: [PATCH 179/970] Fix readonly check in libmdbx-rs (#12096) --- crates/cli/commands/src/common.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 32 ++++++++++++++----- crates/storage/libmdbx-rs/src/transaction.rs | 5 ++- .../storage/libmdbx-rs/tests/environment.rs | 12 +++++++ 4 files changed, 41 insertions(+), 10 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 956a63a5aa0..49fee347ed4 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -126,7 +126,7 @@ impl> Environmen .static_file_provider() .check_consistency(&factory.provider()?, has_receipt_pruning)? { - if factory.db_ref().is_read_only() { + if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); return Ok(factory) } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index edf9321ace4..480f5aaab65 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -4,7 +4,7 @@ use crate::{ flags::EnvironmentFlags, transaction::{RO, RW}, txn_manager::{TxnManager, TxnManagerMessage, TxnPtr}, - Transaction, TransactionKind, + Mode, SyncMode, Transaction, TransactionKind, }; use byteorder::{ByteOrder, NativeEndian}; use mem::size_of; @@ -72,14 +72,14 @@ impl Environment { /// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode. #[inline] - pub fn is_read_write(&self) -> bool { - self.inner.env_kind.is_write_map() + pub fn is_read_write(&self) -> Result { + Ok(!self.is_read_only()?) } /// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode. #[inline] - pub fn is_read_only(&self) -> bool { - !self.inner.env_kind.is_write_map() + pub fn is_read_only(&self) -> Result { + Ok(matches!(self.info()?.mode(), Mode::ReadOnly)) } /// Returns the transaction manager. @@ -425,6 +425,23 @@ impl Info { fsync: self.0.mi_pgop_stat.fsync, } } + + /// Return the mode of the database + #[inline] + pub const fn mode(&self) -> Mode { + let mode = self.0.mi_mode; + if (mode & ffi::MDBX_RDONLY) != 0 { + Mode::ReadOnly + } else if (mode & ffi::MDBX_UTTERLY_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::UtterlyNoSync } + } else if (mode & ffi::MDBX_NOMETASYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::NoMetaSync } + } else if (mode & ffi::MDBX_SAFE_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::SafeNoSync } + } else { + Mode::ReadWrite { sync_mode: SyncMode::Durable } + } + } } impl fmt::Debug for Environment { @@ -781,15 +798,14 @@ impl EnvironmentBuilder { } /// Sets the interprocess/shared threshold to force flush the data buffers to disk, if - /// [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is used. + /// [`SyncMode::SafeNoSync`] is used. pub fn set_sync_bytes(&mut self, v: usize) -> &mut Self { self.sync_bytes = Some(v as u64); self } /// Sets the interprocess/shared relative period since the last unsteady commit to force flush - /// the data buffers to disk, if [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is - /// used. + /// the data buffers to disk, if [`SyncMode::SafeNoSync`] is used. pub fn set_sync_period(&mut self, v: Duration) -> &mut Self { // For this option, mdbx uses units of 1/65536 of a second. let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 88236ebe991..84b2dabc90a 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -6,7 +6,7 @@ use crate::{ txn_manager::{TxnManagerMessage, TxnPtr}, Cursor, Error, Stat, TableObject, }; -use ffi::{mdbx_txn_renew, MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; +use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use indexmap::IndexSet; use parking_lot::{Mutex, MutexGuard}; use std::{ @@ -18,6 +18,9 @@ use std::{ time::Duration, }; +#[cfg(feature = "read-tx-timeouts")] +use ffi::mdbx_txn_renew; + mod private { use super::*; diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 99453ef113a..007418f76bb 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -128,6 +128,18 @@ fn test_info() { // assert_eq!(info.last_pgno(), 1); // assert_eq!(info.last_txnid(), 0); assert_eq!(info.num_readers(), 0); + assert!(matches!(info.mode(), Mode::ReadWrite { sync_mode: SyncMode::Durable })); + assert!(env.is_read_write().unwrap()); + + drop(env); + let env = Environment::builder() + .set_geometry(Geometry { size: Some(map_size..), ..Default::default() }) + .set_flags(EnvironmentFlags { mode: Mode::ReadOnly, ..Default::default() }) + .open(dir.path()) + .unwrap(); + let info = env.info().unwrap(); + assert!(matches!(info.mode(), Mode::ReadOnly)); + assert!(env.is_read_only().unwrap()); } #[test] From 1bdf429af5092a0b737e900977c1418bc070ec59 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 00:20:08 +0800 Subject: [PATCH 180/970] Remove trait method `Call::evm_config` (#12095) --- crates/optimism/rpc/src/eth/call.rs | 11 +++-------- crates/rpc/rpc-eth-api/src/helpers/call.rs | 20 ++++++++------------ crates/rpc/rpc-eth-api/src/helpers/trace.rs | 2 +- crates/rpc/rpc/src/debug.rs | 11 ++++++----- crates/rpc/rpc/src/eth/bundle.rs | 6 +++--- crates/rpc/rpc/src/eth/helpers/call.rs | 7 +------ crates/rpc/rpc/src/trace.rs | 7 ++----- 7 files changed, 24 insertions(+), 40 deletions(-) diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f1c10e6f172..0402165f707 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -9,7 +9,7 @@ use reth_primitives::{ }; use reth_rpc_eth_api::{ helpers::{Call, EthCall, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; @@ -24,9 +24,9 @@ where impl Call for OpEthApi where - Self: LoadState + SpawnBlocking, + N: RpcNodeCore, + Self: LoadState> + SpawnBlocking, Self::Error: From, - N: FullNodeComponents, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -38,11 +38,6 @@ where self.inner.max_simulate_blocks() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - fn create_txn_env( &self, block_env: &BlockEnv, diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 89ae1c8ac96..f2662a86a74 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -3,6 +3,7 @@ use crate::{ AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, + RpcNodeCore, }; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; @@ -300,7 +301,7 @@ pub trait EthCall: Call + LoadPendingBlock { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg.clone(), block_env.clone(), - Call::evm_config(&this).tx_env(tx, *signer), + RpcNodeCore::evm_config(&this).tx_env(tx, *signer), ); let (res, _) = this.transact(&mut db, env)?; db.commit(res.state); @@ -452,7 +453,7 @@ pub trait EthCall: Call + LoadPendingBlock { } /// Executes code on state. -pub trait Call: LoadState + SpawnBlocking { +pub trait Call: LoadState> + SpawnBlocking { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. /// /// Data access in default trait method implementations. @@ -461,11 +462,6 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Executes the closure with the state that corresponds to the given [`BlockId`]. fn with_state_at_block(&self, at: BlockId, f: F) -> Result where @@ -486,7 +482,7 @@ pub trait Call: LoadState + SpawnBlocking { DB: Database, EthApiError: From, { - let mut evm = Call::evm_config(self).evm_with_env(db, env); + let mut evm = self.evm_config().evm_with_env(db, env); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) @@ -504,7 +500,7 @@ pub trait Call: LoadState + SpawnBlocking { DB: Database, EthApiError: From, { - let mut evm = Call::evm_config(self).evm_with_env_and_inspector(db, env, inspector); + let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) @@ -636,7 +632,7 @@ pub trait Call: LoadState + SpawnBlocking { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.transact(&mut db, env)?; @@ -669,7 +665,7 @@ pub trait Call: LoadState + SpawnBlocking { { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); - let mut evm = Call::evm_config(self).evm_with_env(db, env); + let mut evm = self.evm_config().evm_with_env(db, env); let mut index = 0; for (sender, tx) in transactions { if tx.hash() == target_tx_hash { @@ -677,7 +673,7 @@ pub trait Call: LoadState + SpawnBlocking { break } - Call::evm_config(self).fill_tx_env(evm.tx_mut(), tx, *sender); + self.evm_config().fill_tx_env(evm.tx_mut(), tx, *sender); evm.transact_commit().map_err(Self::Error::from_evm_err)?; index += 1; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 4f11734849a..6c7dd0f6f8d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -229,7 +229,7 @@ pub trait Trace: LoadState { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index eeab734a643..dd1cd9739ed 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -27,7 +27,7 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, + helpers::{EthApiSpec, EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; @@ -120,7 +120,8 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(this.eth_api()) + .tx_env(tx.as_signed(), tx.signer()), ), handler_cfg: cfg.handler_cfg, }; @@ -263,7 +264,7 @@ where // apply relevant system calls let mut system_caller = SystemCaller::new( - Call::evm_config(this.eth_api()).clone(), + RpcNodeCore::evm_config(this.eth_api()).clone(), RpcNodeCore::provider(this.eth_api()).chain_spec(), ); @@ -293,7 +294,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env, - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), ), handler_cfg: cfg.handler_cfg, }; @@ -562,7 +563,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx, *signer), + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index e97497786ed..9ce9ee1ebcb 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -12,7 +12,7 @@ use reth_primitives::{ PooledTransactionsElement, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; +use reth_rpc_eth_api::{FromEthApiError, FromEvmError, RpcNodeCore}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, @@ -166,7 +166,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); + let mut evm = RpcNodeCore::evm_config(ð_api).evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -187,7 +187,7 @@ where .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; - Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); + RpcNodeCore::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 396bf9bd08e..d1d33190a7c 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -13,7 +13,7 @@ impl EthCall for EthApi Call for EthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState> + SpawnBlocking, EvmConfig: ConfigureEvm
, { #[inline] @@ -25,9 +25,4 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index b9b15b5366d..2883818afd9 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -21,10 +21,7 @@ use reth_primitives::{BlockId, Header}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, TraceExt}, - FromEthApiError, -}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -124,7 +121,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block, - Call::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), ); let config = TracingInspectorConfig::from_parity_config(&trace_types); From ab07fcfb113cb0b579e1f9f55a5dd9511b576687 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sun, 27 Oct 2024 00:02:14 +0700 Subject: [PATCH 181/970] chore(op): simplify blob fields in newly built block header (#12035) --- Cargo.lock | 1 - crates/optimism/payload/Cargo.toml | 2 -- crates/optimism/payload/src/builder.rs | 36 ++++++++------------------ 3 files changed, 11 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1ca1c9259c..b483fd6641d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8279,7 +8279,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-primitives", "sha2 0.10.8", "thiserror", "tracing", diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index de61def8350..ba0b105e832 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -39,7 +39,6 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true -revm-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -56,5 +55,4 @@ optimism = [ "revm/optimism", "reth-execution-types/optimism", "reth-optimism-consensus/optimism", - "revm-primitives/optimism" ] diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 85f687aa803..5523770e09c 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -7,12 +7,12 @@ use alloy_eips::merge::BEACON_NONCE; use alloy_primitives::U256; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, @@ -30,7 +30,6 @@ use revm::{ primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; use tracing::{debug, trace, warn}; use crate::{ @@ -460,26 +459,16 @@ where // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); - // initialize empty blob sidecars. There are no blob transactions on L2. - let blob_sidecars = Vec::new(); - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - // only determine cancun fields when active - if chain_spec.is_cancun_active_at_timestamp(attributes.payload_attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = + if chain_spec.is_ecotone_active_at_timestamp(attributes.payload_attributes.timestamp) { + (Some(0), Some(0)) } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calc_excess_blob_gas(0, 0)) + (None, None) }; - blob_gas_used = Some(0); - } - let header = Header { parent_hash: parent_block.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -500,7 +489,7 @@ where extra_data, parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, blob_gas_used, - excess_blob_gas: excess_blob_gas.map(Into::into), + excess_blob_gas, requests_hash: None, }; @@ -524,7 +513,7 @@ where let no_tx_pool = attributes.no_tx_pool; - let mut payload = OptimismBuiltPayload::new( + let payload = OptimismBuiltPayload::new( attributes.payload_attributes.id, sealed_block, total_fees, @@ -533,9 +522,6 @@ where Some(executed), ); - // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); - if no_tx_pool { // if `no_tx_pool` is set only transactions from the payload attributes will be included in // the payload. In other words, the payload is deterministic and we can freeze it once we've From 923f4ffa92c6fe387050f099773b61dd6983bff2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 26 Oct 2024 20:22:46 +0200 Subject: [PATCH 182/970] chore: only check for better payload if tx_pool (#12097) --- crates/optimism/payload/src/builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 5523770e09c..36b7d17a07d 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -413,8 +413,8 @@ where } } - // check if we have a better block - if !is_better_payload(best_payload.as_ref(), total_fees) { + // check if we have a better block, but only if we included transactions from the pool + if !attributes.no_tx_pool && !is_better_payload(best_payload.as_ref(), total_fees) { // can skip building the block return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) } From a98dc3973ff733feb11a7c8b50c60459dfaa9351 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 02:56:29 +0800 Subject: [PATCH 183/970] chore(rpc): simplify trait bounds on `EthApiSpec` impl (#12101) --- crates/rpc/rpc/src/eth/helpers/spec.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index c5c8d54c64b..a44692e18a3 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -3,20 +3,17 @@ use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; -use reth_transaction_pool::TransactionPool; use crate::EthApi; impl EthApiSpec for EthApi where - Self: RpcNodeCore, - Pool: TransactionPool + 'static, - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader - + 'static, - Network: NetworkInfo + 'static, - EvmConfig: Send + Sync, + Self: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, { fn starting_block(&self) -> U256 { self.inner.starting_block() From 988c5ee4c5d4b9e1c71d81aaf54ff7e6c1855895 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 07:11:53 +0800 Subject: [PATCH 184/970] chore(rpc): Add super trait `RpcNodeCore` to `LoadPendingBlock` (#12098) --- crates/optimism/rpc/src/eth/call.rs | 8 ++-- crates/optimism/rpc/src/eth/pending_block.rs | 34 +++++---------- crates/rpc/rpc-eth-api/src/helpers/block.rs | 6 +-- crates/rpc/rpc-eth-api/src/helpers/call.rs | 4 +- .../rpc-eth-api/src/helpers/pending_block.rs | 34 ++++++--------- crates/rpc/rpc-eth-api/src/helpers/state.rs | 4 +- crates/rpc/rpc/src/eth/bundle.rs | 4 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 43 ++++++------------- 8 files changed, 48 insertions(+), 89 deletions(-) diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 0402165f707..c8f8200bc69 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,14 +1,12 @@ use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::{ revm_primitives::{BlockEnv, OptimismFields, TxEnv}, Header, }; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadState, SpawnBlocking}, + helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; @@ -17,8 +15,8 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where - Self: Call, - N: FullNodeComponents>, + Self: Call + LoadPendingBlock, + N: RpcNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5b716f39320..3b3b7845cc1 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,9 +1,8 @@ //! Loads OP pending block for a RPC response. use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_primitives::{ revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, @@ -14,7 +13,7 @@ use reth_provider::{ }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, - FromEthApiError, + FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::TransactionPool; @@ -24,33 +23,20 @@ use crate::OpEthApi; impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, - N: FullNodeComponents>, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - /// Returns the locally built pending block async fn local_pending_block( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index da5f275ef0c..7599cbb599d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -9,7 +9,7 @@ use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider use reth_rpc_eth_types::EthStateCache; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; @@ -220,7 +220,7 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - if let Some(pending_block) = LoadPendingBlock::provider(self) + if let Some(pending_block) = RpcNodeCore::provider(self) .pending_block_with_senders() .map_err(Self::Error::from_eth_err)? { @@ -234,7 +234,7 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { }; } - let block_hash = match LoadPendingBlock::provider(self) + let block_hash = match RpcNodeCore::provider(self) .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index f2662a86a74..c2b6524b3ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -97,7 +97,7 @@ pub trait EthCall: Call + LoadPendingBlock { let base_block = self.block_with_senders(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent_hash = base_block.header.hash(); - let total_difficulty = LoadPendingBlock::provider(self) + let total_difficulty = RpcNodeCore::provider(self) .header_td_by_number(block_env.number.to()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block))?; @@ -119,7 +119,7 @@ pub trait EthCall: Call + LoadPendingBlock { block_env.timestamp += U256::from(1); if validation { - let chain_spec = LoadPendingBlock::provider(&this).chain_spec(); + let chain_spec = RpcNodeCore::provider(&this).chain_spec(); let base_fee_params = chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); let base_fee = if let Some(latest) = blocks.last() { diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 872f17ee910..2c04f3beb3b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; -use crate::{EthApiTypes, FromEthApiError, FromEvmError}; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; @@ -43,32 +43,22 @@ use super::SpawnBlocking; /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadPendingBlock: EthApiTypes { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadPendingBlock: + EthApiTypes + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + > +{ /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. fn pending_block(&self) -> &Mutex>; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 2a15b194f13..87e66cb7481 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -233,7 +233,7 @@ pub trait LoadState: Ok((cfg, block_env, origin.state_block_id())) } else { // Use cached values if there is no pending block - let block_hash = LoadPendingBlock::provider(self) + let block_hash = RpcNodeCore::provider(self) .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; @@ -262,7 +262,7 @@ pub trait LoadState: let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; - LoadPendingBlock::evm_config(self).fill_block_env(&mut block_env, header, after_merge); + self.evm_config().fill_block_env(&mut block_env, header, after_merge); Ok((cfg, block_env)) } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 9ce9ee1ebcb..ec1a43c7548 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -130,12 +130,12 @@ where } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block - let parent = LoadPendingBlock::provider(self.eth_api()) + let parent = RpcNodeCore::provider(self.eth_api()) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( - LoadPendingBlock::provider(self.eth_api()) + RpcNodeCore::provider(self.eth_api()) .chain_spec() .base_fee_params_at_block(parent_block), ) { diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 69d55f58bfa..6b28947df35 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,10 +1,13 @@ //! Support for building a pending block with transactions from local view of mempool. -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_primitives::Header; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_api::{ + helpers::{LoadPendingBlock, SpawnBlocking}, + RpcNodeCore, +}; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::TransactionPool; @@ -13,36 +16,18 @@ use crate::EthApi; impl LoadPendingBlock for EthApi where - Self: SpawnBlocking, - Provider: BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Self: SpawnBlocking + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } From 8eb1742284964de1a7a32df523a2d0a4b6df1b4f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 27 Oct 2024 02:59:57 +0100 Subject: [PATCH 185/970] refactor(tx-pool): small refactor (#12107) --- crates/transaction-pool/src/maintain.rs | 2 +- crates/transaction-pool/src/pool/best.rs | 2 +- crates/transaction-pool/src/pool/blob.rs | 15 +++++++-------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 23a8d0dc66a..09c042ae66d 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -575,7 +575,7 @@ where // Filter out errors ::try_from_consensus(tx.into()).ok() }) - .collect::>(); + .collect(); let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 77cd35d8a6b..268e3e262c6 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -196,7 +196,7 @@ impl Iterator for BestTransactions { } } -/// A[`BestTransactions`](crate::traits::BestTransactions) implementation that filters the +/// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// /// Filter out transactions are marked as invalid: diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index cb09e823409..ac39c6ab781 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -11,7 +11,7 @@ use std::{ /// A set of validated blob transactions in the pool that are __not pending__. /// -/// The purpose of this pool is keep track of blob transactions that are queued and to evict the +/// The purpose of this pool is to keep track of blob transactions that are queued and to evict the /// worst blob transactions once the sub-pool is full. /// /// This expects that certain constraints are met: @@ -198,14 +198,13 @@ impl BlobTransactions { &mut self, pending_fees: &PendingFees, ) -> Vec>> { - let to_remove = self.satisfy_pending_fee_ids(pending_fees); - - let mut removed = Vec::with_capacity(to_remove.len()); - for id in to_remove { - removed.push(self.remove_transaction(&id).expect("transaction exists")); - } + let removed = self + .satisfy_pending_fee_ids(pending_fees) + .into_iter() + .map(|id| self.remove_transaction(&id).expect("transaction exists")) + .collect(); - // set pending fees and reprioritize / resort + // Update pending fees and reprioritize self.pending_fees = pending_fees.clone(); self.reprioritize(); From fae36bd25ff1efa01f0c66d5c73606f63c1f0794 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 27 Oct 2024 03:00:32 +0100 Subject: [PATCH 186/970] refactor(storage): small refactor (#12106) --- crates/storage/db-api/src/cursor.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 585aa4947a2..9297f738ab5 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -152,12 +152,7 @@ where impl> Iterator for Walker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { - let start = self.start.take(); - if start.is_some() { - return start - } - - self.cursor.next().transpose() + self.start.take().or_else(|| self.cursor.next().transpose()) } } From e158369a68102661fdfdb8ecf1ee205360e1104c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 15:35:24 +0800 Subject: [PATCH 187/970] chore(rpc): remove redundant trait method `LoadBlock::provider` (#12100) --- crates/optimism/rpc/src/eth/block.rs | 7 +--- crates/optimism/rpc/src/eth/transaction.rs | 15 ++------- crates/rpc/rpc-eth-api/src/helpers/block.rs | 25 +++++++------- crates/rpc/rpc-eth-api/src/helpers/call.rs | 3 +- .../rpc-eth-api/src/helpers/transaction.rs | 33 ++++++++----------- crates/rpc/rpc/src/eth/helpers/block.rs | 5 --- crates/rpc/rpc/src/eth/helpers/transaction.rs | 20 +++-------- 7 files changed, 34 insertions(+), 74 deletions(-) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index dfdd0960856..42c4789d44c 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -7,7 +7,7 @@ use reth_chainspec::ChainSpecProvider; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_provider::HeaderProvider; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, @@ -87,11 +87,6 @@ where Self: LoadPendingBlock + SpawnBlocking, N: FullNodeComponents, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 4ac2d7e6b74..451c8a805fb 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -10,7 +10,7 @@ use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc::eth::EthTxBuilder; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, TransactionCompat, + FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; @@ -61,21 +61,12 @@ where impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, - N: FullNodeComponents, + N: RpcNodeCore, { - type Pool = N::Pool; - - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - + #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } - - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } impl OpEthApi diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 7599cbb599d..839b4891914 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -85,13 +85,13 @@ pub trait EthBlocks: LoadBlock { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self) + return Ok(RpcNodeCore::provider(self) .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.transactions.len())) } - let block_hash = match LoadBlock::provider(self) + let block_hash = match RpcNodeCore::provider(self) .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -132,7 +132,7 @@ pub trait EthBlocks: LoadBlock { if block_id.is_pending() { // First, try to get the pending block from the provider, in case we already // received the actual pending block from the CL. - if let Some((block, receipts)) = LoadBlock::provider(self) + if let Some((block, receipts)) = RpcNodeCore::provider(self) .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { @@ -145,7 +145,7 @@ pub trait EthBlocks: LoadBlock { } } - if let Some(block_hash) = LoadBlock::provider(self) + if let Some(block_hash) = RpcNodeCore::provider(self) .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -167,7 +167,7 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, ) -> Result>, Self::Error> { - LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + RpcNodeCore::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -182,12 +182,12 @@ pub trait EthBlocks: LoadBlock { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self) + RpcNodeCore::provider(self) .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.ommers) } else { - LoadBlock::provider(self) + RpcNodeCore::provider(self) .ommers_by_id(block_id) .map_err(Self::Error::from_eth_err)? } @@ -202,11 +202,6 @@ pub trait EthBlocks: LoadBlock { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. @@ -220,7 +215,8 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - if let Some(pending_block) = RpcNodeCore::provider(self) + if let Some(pending_block) = self + .provider() .pending_block_with_senders() .map_err(Self::Error::from_eth_err)? { @@ -234,7 +230,8 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { }; } - let block_hash = match RpcNodeCore::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index c2b6524b3ef..b90577c1486 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -259,7 +259,8 @@ pub trait EthCall: Call + LoadPendingBlock { // if it's not pending, we should always use block_hash over block_number to ensure that // different provider calls query data related to the same block. if !is_block_target_pending { - target_block = LoadBlock::provider(self) + target_block = self + .provider() .block_hash_for_id(target_block) .map_err(|_| EthApiError::HeaderNotFound(target_block))? .ok_or_else(|| EthApiError::HeaderNotFound(target_block))? diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index a91e4e6faef..c693945caca 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -111,7 +111,7 @@ pub trait EthTransactions: LoadTransaction { } self.spawn_blocking_io(move |ref this| { - Ok(LoadTransaction::provider(this) + Ok(RpcNodeCore::provider(this) .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? .map(|tx| tx.encoded_2718().into())) @@ -166,7 +166,7 @@ pub trait EthTransactions: LoadTransaction { { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match LoadTransaction::provider(&this) + let (tx, meta) = match RpcNodeCore::provider(&this) .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -257,7 +257,7 @@ pub trait EthTransactions: LoadTransaction { return Ok(None); } - let Ok(high) = LoadBlock::provider(self).best_block_number() else { + let Ok(high) = RpcNodeCore::provider(self).best_block_number() else { return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); }; @@ -383,10 +383,15 @@ pub trait EthTransactions: LoadTransaction { let transaction = self.sign_request(&from, request).await?.with_signer(from); - let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(transaction.into()).map_err(|_| EthApiError::TransactionConversionError)?; + let pool_transaction = + <::Pool as TransactionPool>::Transaction::try_from_consensus( + transaction.into(), + ) + .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = LoadTransaction::pool(self) + let hash = self + .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await .map_err(Self::Error::from_eth_err)?; @@ -460,26 +465,14 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { - /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the - /// supported transaction type. - type Pool: TransactionPool; - - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl TransactionsProvider; - +pub trait LoadTransaction: + SpawnBlocking + FullEthApiTypes + RpcNodeCore +{ /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading data from pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> &Self::Pool; - /// Returns the transaction by hash. /// /// Checks the pool and state. diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b29a24c38c4..22853f26357 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -73,11 +73,6 @@ where Self: LoadPendingBlock + SpawnBlocking, Provider: BlockReaderIdExt, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 24a13cb8062..c4505bef09d 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,7 +3,7 @@ use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FullEthApiTypes, + FullEthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::EthStateCache; use reth_transaction_pool::TransactionPool; @@ -31,26 +31,14 @@ where impl LoadTransaction for EthApi where - Self: SpawnBlocking + FullEthApiTypes, - Provider: TransactionsProvider, - Pool: TransactionPool, + Self: SpawnBlocking + + FullEthApiTypes + + RpcNodeCore, { - type Pool = Pool; - - #[inline] - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } - - #[inline] - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } #[cfg(test)] From 768404c59e7b673d055faf1791bb376b97466ef4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 16:30:02 +0800 Subject: [PATCH 188/970] chore(rpc): remove redundant trait bounds in eth api (#12105) --- crates/e2e-test-utils/src/node.rs | 7 ++----- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc/src/eth/filter.rs | 3 +-- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 07df36a33e1..8b385115b3e 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -10,10 +10,7 @@ use reth::{ network::PeersHandleProvider, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ - api::eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - FullEthApiTypes, - }, + api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, types::engine::PayloadStatusEnum, }, }; @@ -97,7 +94,7 @@ where where Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, - AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, + AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index c693945caca..791cb2ae1eb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -231,7 +231,7 @@ pub trait EthTransactions: LoadTransaction { include_pending: bool, ) -> impl Future>, Self::Error>> + Send where - Self: LoadBlock + LoadState + FullEthApiTypes, + Self: LoadBlock + LoadState, { async move { // Check the pool first diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 5ef224609c5..3d05cdc727f 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -145,8 +145,7 @@ where impl EthFilter where Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, - ::Transaction: 'static, + Pool: TransactionPool + 'static, Eth: FullEthApiTypes, { /// Returns all the filter changes for the given id, if any From 131cc5175ebf8c5714e090b230b4926678ef7d97 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 17:21:34 +0800 Subject: [PATCH 189/970] chore(rpc): remove redundant `EthBlocks::provider` (#12109) --- crates/optimism/rpc/src/eth/block.rs | 11 ++---- crates/rpc/rpc-eth-api/src/helpers/block.rs | 39 +++++++++------------ crates/rpc/rpc/src/eth/helpers/block.rs | 7 +--- 3 files changed, 21 insertions(+), 36 deletions(-) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 42c4789d44c..ed31a750949 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -4,13 +4,13 @@ use alloy_rpc_types::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::FullNodeComponents; use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; use reth_provider::HeaderProvider; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcReceipt, + RpcNodeCore, RpcReceipt, }; use reth_rpc_eth_types::EthStateCache; @@ -22,13 +22,8 @@ where Error = OpEthApiError, NetworkTypes: Network, >, - N: FullNodeComponents>, + N: RpcNodeCore + HeaderProvider>, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 839b4891914..861afb3ad26 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -9,7 +9,7 @@ use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider use reth_rpc_eth_types::EthStateCache; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt}; +use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; @@ -20,12 +20,7 @@ pub type BlockAndReceiptsResult = Result impl HeaderProvider; - +pub trait EthBlocks: LoadBlock { /// Returns the block header for the given block id. fn rpc_block_header( &self, @@ -52,15 +47,15 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); - let mut total_difficulty = EthBlocks::provider(self) + let mut total_difficulty = self + .provider() .header_td_by_number(block.number) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using // the hash this only matters if the chain is currently transitioning the merge block and there's a reorg: - total_difficulty = EthBlocks::provider(self) - .header_td(&block.hash()) - .map_err(Self::Error::from_eth_err)?; + total_difficulty = + self.provider().header_td(&block.hash()).map_err(Self::Error::from_eth_err)?; } let block = from_block( @@ -85,13 +80,15 @@ pub trait EthBlocks: LoadBlock { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(RpcNodeCore::provider(self) + return Ok(self + .provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.transactions.len())) } - let block_hash = match RpcNodeCore::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -132,7 +129,8 @@ pub trait EthBlocks: LoadBlock { if block_id.is_pending() { // First, try to get the pending block from the provider, in case we already // received the actual pending block from the CL. - if let Some((block, receipts)) = RpcNodeCore::provider(self) + if let Some((block, receipts)) = self + .provider() .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { @@ -145,9 +143,8 @@ pub trait EthBlocks: LoadBlock { } } - if let Some(block_hash) = RpcNodeCore::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + if let Some(block_hash) = + self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { return LoadReceipt::cache(self) .get_block_and_receipts(block_hash) @@ -167,7 +164,7 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, ) -> Result>, Self::Error> { - RpcNodeCore::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -182,14 +179,12 @@ pub trait EthBlocks: LoadBlock { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - RpcNodeCore::provider(self) + self.provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.ommers) } else { - RpcNodeCore::provider(self) - .ommers_by_id(block_id) - .map_err(Self::Error::from_eth_err)? + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 22853f26357..a869cbd5403 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -17,14 +17,9 @@ where Self: LoadBlock< Error = EthApiError, NetworkTypes: alloy_network::Network, + Provider: HeaderProvider, >, - Provider: HeaderProvider, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, From b7b3f8149c8f91c1b075f5d610cf3210bc23bd2e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 27 Oct 2024 22:24:21 +0800 Subject: [PATCH 190/970] chore(rpc): remove redundant `Trace::evm_config` (#12102) --- crates/optimism/rpc/src/eth/mod.rs | 8 ++------ crates/rpc/rpc-eth-api/src/helpers/trace.rs | 15 +++++---------- crates/rpc/rpc/src/eth/helpers/trace.rs | 10 ++-------- 3 files changed, 9 insertions(+), 24 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index bc1692dff4e..d12a2dd02cd 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -243,13 +243,9 @@ where impl Trace for OpEthApi where - Self: LoadState, - N: FullNodeComponents, + Self: LoadState>, + N: RpcNodeCore, { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } impl AddDevSigners for OpEthApi diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 6c7dd0f6f8d..da1d1cdb919 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -21,12 +21,7 @@ use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndSta use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState { - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -60,7 +55,7 @@ pub trait Trace: LoadState { I: GetInspector, { - let mut evm = Trace::evm_config(self).evm_with_env_and_inspector(db, env, inspector); + let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (db, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env, db)) @@ -201,7 +196,7 @@ pub trait Trace: LoadState { // apply relevant system calls let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), + this.evm_config().clone(), RpcNodeCore::provider(&this).chain_spec(), ); system_caller @@ -344,7 +339,7 @@ pub trait Trace: LoadState { // apply relevant system calls let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), + this.evm_config().clone(), RpcNodeCore::provider(&this).chain_spec(), ); system_caller @@ -379,7 +374,7 @@ pub trait Trace: LoadState { block_number: Some(block_number), base_fee: Some(base_fee), }; - let tx_env = Trace::evm_config(&this).tx_env(tx, *signer); + let tx_env = this.evm_config().tx_env(tx, *signer); (tx_info, tx_env) }) .peekable(); diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index c40b7acf50d..b270ed1b2ad 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -6,13 +6,7 @@ use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; -impl Trace for EthApi -where - Self: LoadState, - EvmConfig: ConfigureEvm
, +impl Trace for EthApi where + Self: LoadState> { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } From 0c516091b8f45f07cf4d76879153d3cd90015363 Mon Sep 17 00:00:00 2001 From: Parikalp Bhardwaj <53660958+Parikalp-Bhardwaj@users.noreply.github.com> Date: Sun, 27 Oct 2024 19:11:03 +0400 Subject: [PATCH 191/970] TransactionsHandle propagation commands should not adhere to caching (#12079) Co-authored-by: Matthias Seitz --- crates/net/network/src/transactions/mod.rs | 113 +++++++++++++++++---- 1 file changed, 93 insertions(+), 20 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 439f92bada9..b6e589c6ff7 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -416,6 +416,7 @@ where fn propagate_all(&mut self, hashes: Vec) { let propagated = self.propagate_transactions( self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, ); // notify pool so events get fired @@ -431,6 +432,7 @@ where fn propagate_transactions( &mut self, to_propagate: Vec, + propagation_mode: PropagationMode, ) -> PropagatedTransactions { let mut propagated = PropagatedTransactions::default(); if self.network.tx_gossip_disabled() { @@ -449,14 +451,18 @@ where PropagateTransactionsBuilder::full(peer.version) }; - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to the - // peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen transactions - if !peer.seen_transactions.contains(&tx.hash()) { - // add transaction to the list of hashes to propagate - builder.push(tx); + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(&tx.hash()) { + builder.push(tx); + } } } @@ -514,6 +520,7 @@ where &mut self, txs: Vec, peer_id: PeerId, + propagation_mode: PropagationMode, ) -> Option { trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); @@ -525,10 +532,17 @@ where let to_propagate = self.pool.get_all(txs).into_iter().map(PropagateTransaction::new); - // Iterate through the transactions to propagate and fill the hashes and full transaction - for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { - full_transactions.push(&tx); + if propagation_mode.is_forced() { + // skip cache check if forced + full_transactions.extend(to_propagate); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Only include if the peer hasn't seen the transaction + full_transactions.push(&tx); + } } } @@ -546,6 +560,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(hash); } + // send hashes of transactions self.network.send_transactions_hashes(peer_id, new_pooled_hashes); } @@ -557,6 +572,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(tx.hash()); } + // send full transactions self.network.send_transactions(peer_id, new_full_transactions); } @@ -570,7 +586,12 @@ where /// Propagate the transaction hashes to the given peer /// /// Note: This will only send the hashes for transactions that exist in the pool. - fn propagate_hashes_to(&mut self, hashes: Vec, peer_id: PeerId) { + fn propagate_hashes_to( + &mut self, + hashes: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) { trace!(target: "net::tx", "Start propagating transactions as hashes"); // This fetches a transactions from the pool, including the blob transactions, which are @@ -589,9 +610,14 @@ where // check if transaction is known to peer let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - for tx in to_propagate { - if !peer.seen_transactions.insert(tx.hash()) { - hashes.push(&tx); + if propagation_mode.is_forced() { + hashes.extend(to_propagate) + } else { + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Include if the peer hasn't seen it + hashes.push(&tx); + } } } @@ -880,14 +906,16 @@ where self.on_new_pending_transactions(vec![hash]) } TransactionsCommand::PropagateHashesTo(hashes, peer) => { - self.propagate_hashes_to(hashes, peer) + self.propagate_hashes_to(hashes, peer, PropagationMode::Forced) } TransactionsCommand::GetActivePeers(tx) => { let peers = self.peers.keys().copied().collect::>(); tx.send(peers).ok(); } TransactionsCommand::PropagateTransactionsTo(txs, peer) => { - if let Some(propagated) = self.propagate_full_transactions_to_peer(txs, peer) { + if let Some(propagated) = + self.propagate_full_transactions_to_peer(txs, peer, PropagationMode::Forced) + { self.pool.on_propagated(propagated); } } @@ -1395,6 +1423,29 @@ where } } +/// Represents the different modes of transaction propagation. +/// +/// This enum is used to determine how transactions are propagated to peers in the network. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum PropagationMode { + /// Default propagation mode. + /// + /// Transactions are only sent to peers that haven't seen them yet. + Basic, + /// Forced propagation mode. + /// + /// Transactions are sent to all peers regardless of whether they have been sent or received + /// before. + Forced, +} + +impl PropagationMode { + /// Returns `true` if the propagation kind is `Forced`. + const fn is_forced(self) -> bool { + matches!(self, Self::Forced) + } +} + /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] struct PropagateTransaction { @@ -1441,6 +1492,13 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(tx); + } + } + /// Appends a transaction to the list. fn push(&mut self, transaction: &PropagateTransaction) { match self { @@ -1502,6 +1560,13 @@ impl FullTransactionsBuilder { } } + /// Appends all transactions. + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx) + } + } + /// Append a transaction to the list of full transaction if the total message bytes size doesn't /// exceed the soft maximum target byte size. The limit is soft, meaning if one single /// transaction goes over the limit, it will be broadcasted in its own [`Transactions`] @@ -1581,6 +1646,13 @@ impl PooledTransactionsHashesBuilder { } } + /// Appends all hashes + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx); + } + } + fn push(&mut self, tx: &PropagateTransaction) { match self { Self::Eth66(msg) => msg.0.push(tx.hash()), @@ -2388,7 +2460,8 @@ mod tests { let eip4844_tx = Arc::new(factory.create_eip4844()); propagate.push(PropagateTransaction::new(eip4844_tx.clone())); - let propagated = tx_manager.propagate_transactions(propagate.clone()); + let propagated = + tx_manager.propagate_transactions(propagate.clone(), PropagationMode::Basic); assert_eq!(propagated.0.len(), 2); let prop_txs = propagated.0.get(eip1559_tx.transaction.hash()).unwrap(); assert_eq!(prop_txs.len(), 1); @@ -2404,7 +2477,7 @@ mod tests { peer.seen_transactions.contains(eip4844_tx.transaction.hash()); // propagate again - let propagated = tx_manager.propagate_transactions(propagate); + let propagated = tx_manager.propagate_transactions(propagate, PropagationMode::Basic); assert!(propagated.0.is_empty()); } } From 1c36b7161216a76382a082cd91cdf8c006f88609 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Oct 2024 08:13:47 +0100 Subject: [PATCH 192/970] docs: small fix in payload doc (#12116) --- crates/payload/basic/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index b8eab3c0fea..d0bb29502ea 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -95,10 +95,11 @@ impl BasicPayloadJobGenerator Client software SHOULD stop the updating process when either a call to engine_getPayload - // > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet - // > configuration) have passed since the point in time identified by the timestamp parameter. - // See also + /// > Client software SHOULD stop the updating process when either a call to engine_getPayload + /// > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet + /// > configuration) have passed since the point in time identified by the timestamp parameter. + /// + /// See also #[inline] fn max_job_duration(&self, unix_timestamp: u64) -> Duration { let duration_until_timestamp = duration_until(unix_timestamp); From 8605d04a09679904ef25594729ac6b83dfcacfcb Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Oct 2024 09:30:06 +0100 Subject: [PATCH 193/970] refactor: rm re-exports of alloy eip 4844 constants (#12120) --- Cargo.lock | 2 +- crates/consensus/common/src/validation.rs | 6 ++---- crates/ethereum/payload/src/lib.rs | 3 +-- crates/node/events/Cargo.toml | 2 +- crates/node/events/src/node.rs | 5 ++--- crates/primitives/src/constants/eip4844.rs | 7 ------- crates/primitives/src/constants/mod.rs | 3 --- crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/transaction/pooled.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 5 +++-- crates/rpc/rpc-eth-types/src/fee_history.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 6 ++++-- crates/transaction-pool/src/test_utils/mock.rs | 8 ++++---- crates/transaction-pool/src/traits.rs | 3 ++- crates/transaction-pool/src/validate/eth.rs | 5 ++--- 16 files changed, 26 insertions(+), 37 deletions(-) delete mode 100644 crates/primitives/src/constants/eip4844.rs diff --git a/Cargo.lock b/Cargo.lock index b483fd6641d..6762da237cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8033,6 +8033,7 @@ name = "reth-node-events" version = "1.1.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -8041,7 +8042,6 @@ dependencies = [ "reth-beacon-consensus", "reth-network", "reth-network-api", - "reth-primitives", "reth-primitives-traits", "reth-provider", "reth-prune", diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1070bbdbc0f..c6539cdcf71 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,12 +1,10 @@ //! Collection of methods for block validation. use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index f14c145889c..73b22efac40 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,7 +10,7 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::{eip7685::Requests, merge::BEACON_NONCE}; +use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -25,7 +25,6 @@ use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, EthereumHardforks, Header, Receipt, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 3b515b8ab5e..6af3d8cbeb4 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -19,13 +19,13 @@ reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 92f8cb5e0fe..fb0f4d48d77 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -10,7 +10,6 @@ use reth_beacon_consensus::{ }; use reth_network::NetworkEvent; use reth_network_api::PeersInfo; -use reth_primitives::constants; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; @@ -265,8 +264,8 @@ impl NodeState { gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, + blobs=block.header.blob_gas_used.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs deleted file mode 100644 index 14e892adfbe..00000000000 --- a/crates/primitives/src/constants/eip4844.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. - -pub use alloy_eips::eip4844::{ - BLOB_GASPRICE_UPDATE_FRACTION, BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, - FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENT_BYTES, MAX_BLOBS_PER_BLOCK, MAX_DATA_GAS_PER_BLOCK, - TARGET_BLOBS_PER_BLOCK, TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG, -}; diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index fd1dc158624..09c488cc25a 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,6 +1,3 @@ //! Ethereum protocol-related constants pub use reth_primitives_traits::constants::*; - -/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. -pub mod eip4844; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b09fff9e2b6..3a5c3674166 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -297,7 +297,7 @@ impl Transaction { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 32d4da65980..000ff41fe52 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -264,7 +264,7 @@ impl PooledTransactionsElement { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 34ba6dc7e4e..20e847a8cc9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -167,7 +167,7 @@ pub trait EthFees: LoadFee { base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( header.blob_gas_used.unwrap_or_default() as f64 - / reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); // Percentiles were specified, so we need to collect reward percentile ino diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 2c04f3beb3b..f2d1416139b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -6,7 +6,9 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, +}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rpc_types::BlockNumberOrTag; use futures::Future; @@ -17,7 +19,6 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::calculate_transaction_root, revm_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index c845d968387..7692d47de99 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -366,7 +366,7 @@ impl FeeHistoryEntry { gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), blob_gas_used_ratio: block.blob_gas_used() as f64 / - reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, excess_blob_gas: block.excess_blob_gas, blob_gas_used: block.blob_gas_used, gas_used: block.gas_used, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 42de860db79..c6369c98a7f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -22,9 +22,11 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; -use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use alloy_eips::{ + eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, +}; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::eip4844::BLOB_TX_MIN_BLOB_GASPRICE; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 99b0caaf48a..a3cddaf0a71 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -11,7 +11,7 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; -use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList}; +use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList, eip4844::DATA_GAS_PER_BLOB}; use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ @@ -19,9 +19,9 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - constants::eip4844::DATA_GAS_PER_BLOB, transaction::TryFromRecoveredTransactionError, - BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, + transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, + BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Signature, Transaction, + TransactionSigned, TransactionSignedEcRecovered, TxType, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c21a7a4ea75..9db9c53d387 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1484,7 +1484,8 @@ impl Stream for NewSubpoolTransactionStream { mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; - use reth_primitives::{constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned}; + use alloy_eips::eip4844::DATA_GAS_PER_BLOB; + use reth_primitives::{Signature, TransactionSigned}; #[test] fn test_pool_size_invariants() { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index bf7749fb85c..62e9f3f2917 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -15,10 +15,9 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{ - constants::eip4844::MAX_BLOBS_PER_BLOCK, GotExpected, InvalidTransactionError, SealedBlock, -}; +use reth_primitives::{GotExpected, InvalidTransactionError, SealedBlock}; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ From fbdebe08e02214eb5df7a13d6277878a337c1b86 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Oct 2024 11:16:33 +0100 Subject: [PATCH 194/970] chain-state: fix typo (#12112) --- crates/chain-state/src/in_memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index a850e66521a..6bef197bea9 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -777,7 +777,7 @@ pub struct ExecutedBlock { pub senders: Arc>, /// Block's execution outcome. pub execution_output: Arc, - /// Block's hashedst state. + /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, From 0f86287b65ac11113c47a6a0627a737ff9719106 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 10:09:47 +0000 Subject: [PATCH 195/970] fix(trie): sparse trie walk should be done in a sorted manner (#12087) --- crates/trie/sparse/src/trie.rs | 49 ++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8d65378f614..8b214d5f7ba 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -11,7 +11,7 @@ use reth_trie_common::{ EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::{collections::HashSet, fmt}; +use std::fmt; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. @@ -579,19 +579,19 @@ impl RevealedSparseTrie { /// Returns a list of paths to the nodes that are located at the provided depth when counting /// from the root node. If there's a leaf at a depth less than the provided depth, it will be /// included in the result. - fn get_nodes_at_depth(&self, depth: usize) -> HashSet { + fn get_nodes_at_depth(&self, depth: usize) -> Vec { let mut paths = Vec::from([(Nibbles::default(), 0)]); - let mut targets = HashSet::::default(); + let mut targets = Vec::new(); while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} SparseNode::Leaf { .. } => { - targets.insert(path); + targets.push(path); } SparseNode::Extension { key, .. } => { if level >= depth { - targets.insert(path); + targets.push(path); } else { path.extend_from_slice_unchecked(key); paths.push((path, level + 1)); @@ -599,9 +599,9 @@ impl RevealedSparseTrie { } SparseNode::Branch { state_mask, .. } => { if level >= depth { - targets.insert(path); + targets.push(path); } else { - for bit in CHILD_INDEX_RANGE { + for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child_path = path.clone(); child_path.push_unchecked(bit); @@ -666,7 +666,9 @@ impl RevealedSparseTrie { } branch_child_buf.clear(); - for bit in CHILD_INDEX_RANGE { + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack. + for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); child.push_unchecked(bit); @@ -674,13 +676,17 @@ impl RevealedSparseTrie { } } - branch_value_stack_buf.clear(); - for child_path in &branch_child_buf { + branch_value_stack_buf.resize(branch_child_buf.len(), Default::default()); + let mut added_children = false; + for (i, child_path) in branch_child_buf.iter().enumerate() { if rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { let (_, child) = rlp_node_stack.pop().unwrap(); - branch_value_stack_buf.push(child); + // Insert children in the resulting buffer in a normal order, because + // initially we iterated in reverse. + branch_value_stack_buf[branch_child_buf.len() - i - 1] = child; + added_children = true; } else { - debug_assert!(branch_value_stack_buf.is_empty()); + debug_assert!(!added_children); path_stack.push(path); path_stack.extend(branch_child_buf.drain(..)); continue 'main @@ -1568,38 +1574,35 @@ mod tests { .unwrap(); sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); - assert_eq!(sparse.get_nodes_at_depth(0), HashSet::from([Nibbles::default()])); - assert_eq!( - sparse.get_nodes_at_depth(1), - HashSet::from([Nibbles::from_nibbles_unchecked([0x5])]) - ); + assert_eq!(sparse.get_nodes_at_depth(0), vec![Nibbles::default()]); + assert_eq!(sparse.get_nodes_at_depth(1), vec![Nibbles::from_nibbles_unchecked([0x5])]); assert_eq!( sparse.get_nodes_at_depth(2), - HashSet::from([ + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3]) - ]) + ] ); assert_eq!( sparse.get_nodes_at_depth(3), - HashSet::from([ + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) - ]) + ] ); assert_eq!( sparse.get_nodes_at_depth(4), - HashSet::from([ + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) - ]) + ] ); } } From e4bd13534df447e5da190c7216ffcd7232ff8e8f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:13:43 +0900 Subject: [PATCH 196/970] fix(ci): remove import path from type names on `compact-codec` (#12125) --- .github/workflows/compact.yml | 4 +++- crates/cli/commands/src/test_vectors/compact.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 63f6f282fa2..c5d39f72aec 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -36,7 +36,9 @@ jobs: ref: ${{ github.base_ref || 'main' }} # On `main` branch, generates test vectors and serializes them to disk using `Compact`. - name: Generate compact vectors - run: ${{ matrix.bin }} -- test-vectors compact --write + run: | + ${{ matrix.bin }} -- test-vectors compact --write && + for f in ./testdata/micro/compact/*; do mv "$f" "$(dirname "$f")/$(basename "$f" | awk -F '__' '{print $NF}')"; done - name: Checkout PR uses: actions/checkout@v4 with: diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 162ee1ceaa4..94552d5c215 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -267,5 +267,5 @@ where } pub fn type_name() -> String { - std::any::type_name::().replace("::", "__") + std::any::type_name::().split("::").last().unwrap_or(std::any::type_name::()).to_string() } From 77e5748124ffd68c646bf775fe086227f7603129 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 28 Oct 2024 18:14:11 +0800 Subject: [PATCH 197/970] chore(rpc): remove redundant `LoadFee::provider` (#12122) --- crates/optimism/rpc/src/eth/mod.rs | 24 ++++++++++----------- crates/rpc/rpc-eth-api/src/helpers/block.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 23 +++++++------------- crates/rpc/rpc/src/eth/helpers/fees.rs | 21 +++++++----------- 4 files changed, 28 insertions(+), 42 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index d12a2dd02cd..9b04e1c730a 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -14,15 +14,15 @@ use std::{fmt, sync::Arc}; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - HeaderProvider, StageCheckpointReader, StateProviderFactory, + BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, + StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -184,23 +184,21 @@ where impl LoadFee for OpEthApi where - Self: LoadBlock, - N: FullNodeComponents>, + Self: LoadBlock, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 861afb3ad26..217e84a4754 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -20,7 +20,7 @@ pub type BlockAndReceiptsResult = Result { +pub trait EthBlocks: LoadBlock { /// Returns the block header for the given block id. fn rpc_block_header( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 20e847a8cc9..dcde2214a5d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -3,8 +3,8 @@ use alloy_primitives::U256; use alloy_rpc_types::{BlockNumberOrTag, FeeHistory}; use futures::Future; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_chainspec::EthChainSpec; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, @@ -82,7 +82,8 @@ pub trait EthFees: LoadFee { block_count = block_count.saturating_sub(1); } - let end_block = LoadFee::provider(self) + let end_block = self + .provider() .block_number_for_id(newest_block.into()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; @@ -147,13 +148,12 @@ pub trait EthFees: LoadFee { // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the // next block base_fee_per_gas - .push(last_entry.next_block_base_fee(LoadFee::provider(self).chain_spec()) - as u128); + .push(last_entry.next_block_base_fee(self.provider().chain_spec()) as u128); base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self) + let headers = self.provider() .sealed_headers_range(start_block..=end_block) .map_err(Self::Error::from_eth_err)?; if headers.len() != block_count as usize { @@ -197,7 +197,7 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - LoadFee::provider(self) + self.provider() .chain_spec() .base_fee_params_at_timestamp(last_header.timestamp) .next_block_base_fee( @@ -242,13 +242,6 @@ pub trait EthFees: LoadFee { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. pub trait LoadFee: LoadBlock { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; - /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. @@ -257,7 +250,7 @@ pub trait LoadFee: LoadBlock { /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. - fn gas_oracle(&self) -> &GasPriceOracle; + fn gas_oracle(&self) -> &GasPriceOracle; /// Returns a handle for reading fee history data from memory. /// diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index a792f728951..2c5db5bacba 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,8 +1,7 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::EthereumHardforks; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; - +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; @@ -15,23 +14,19 @@ impl EthFees for EthApi LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, + Self: LoadBlock, + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, { - #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - #[inline] fn cache(&self) -> &EthStateCache { self.inner.cache() } #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } From 8f5fd1d70c1670e576b3103a12b9027fd91fac70 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 28 Oct 2024 18:14:40 +0800 Subject: [PATCH 198/970] chore(rpc): remove redundant `EthTransactions::provider` (#12121) --- crates/optimism/rpc/src/eth/transaction.rs | 10 +++---- .../rpc-eth-api/src/helpers/transaction.rs | 27 ++++++++----------- crates/rpc/rpc/src/eth/helpers/transaction.rs | 9 +------ 3 files changed, 15 insertions(+), 31 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 451c8a805fb..5135b13a2de 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -19,13 +19,9 @@ use crate::{OpEthApi, SequencerClient}; impl EthTransactions for OpEthApi where - Self: LoadTransaction, - N: FullNodeComponents, + Self: LoadTransaction, + N: RpcNodeCore, { - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() } @@ -71,7 +67,7 @@ where impl OpEthApi where - N: FullNodeComponents, + N: RpcNodeCore, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 791cb2ae1eb..af647fedf2c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -52,12 +52,7 @@ use super::{ /// See also /// /// This implementation follows the behaviour of Geth and disables the basefee check for tracing. -pub trait EthTransactions: LoadTransaction { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - +pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Singer access in default (L1) trait method implementations. @@ -111,7 +106,8 @@ pub trait EthTransactions: LoadTransaction { } self.spawn_blocking_io(move |ref this| { - Ok(RpcNodeCore::provider(this) + Ok(this + .provider() .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? .map(|tx| tx.encoded_2718().into())) @@ -166,7 +162,8 @@ pub trait EthTransactions: LoadTransaction { { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match RpcNodeCore::provider(&this) + let (tx, meta) = match this + .provider() .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -174,13 +171,11 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = match EthTransactions::provider(&this) - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = + match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -257,7 +252,7 @@ pub trait EthTransactions: LoadTransaction { return Ok(None); } - let Ok(high) = RpcNodeCore::provider(self).best_block_number() else { + let Ok(high) = self.provider().best_block_number() else { return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); }; diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index c4505bef09d..623db35e5ad 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -13,15 +13,8 @@ use crate::EthApi; impl EthTransactions for EthApi where - Self: LoadTransaction, - Pool: TransactionPool + 'static, - Provider: BlockReaderIdExt, + Self: LoadTransaction, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - #[inline] fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() From 268090e879f40bf388e3b0f1dac9983b76c2255b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 10:44:36 +0000 Subject: [PATCH 199/970] bench(trie): `RevealedSparseTrie::update_rlp_node_level` (#12046) --- crates/trie/sparse/Cargo.toml | 4 ++ crates/trie/sparse/benches/rlp_node.rs | 78 ++++++++++++++++++++++++++ crates/trie/sparse/benches/root.rs | 1 + crates/trie/sparse/src/trie.rs | 2 +- 4 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 crates/trie/sparse/benches/rlp_node.rs diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 26d036f57ff..1c5bb7d8a33 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -41,3 +41,7 @@ rand.workspace = true [[bench]] name = "root" harness = false + +[[bench]] +name = "rlp_node" +harness = false diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs new file mode 100644 index 00000000000..57ab52978b6 --- /dev/null +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -0,0 +1,78 @@ +#![allow(missing_docs, unreachable_pub)] + +use std::time::{Duration, Instant}; + +use alloy_primitives::{B256, U256}; +use criterion::{criterion_group, criterion_main, Criterion}; +use prop::strategy::ValueTree; +use proptest::{prelude::*, test_runner::TestRunner}; +use rand::seq::IteratorRandom; +use reth_testing_utils::generators; +use reth_trie::Nibbles; +use reth_trie_sparse::RevealedSparseTrie; + +pub fn update_rlp_node_level(c: &mut Criterion) { + let mut rng = generators::rng(); + + let mut group = c.benchmark_group("update rlp node level"); + group.sample_size(20); + + for size in [100_000] { + let mut runner = TestRunner::new(ProptestConfig::default()); + let state = proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current(); + + // Create a sparse trie with `size` leaves + let mut sparse = RevealedSparseTrie::default(); + for (key, value) in &state { + sparse + .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .unwrap(); + } + sparse.root(); + + for updated_leaves in [0.1, 1.0] { + for key in state + .keys() + .choose_multiple(&mut rng, (size as f64 * (updated_leaves / 100.0)) as usize) + { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(&rng.gen::()).to_vec(), + ) + .unwrap(); + } + + // Calculate the maximum depth of the trie for the given number of leaves + let max_depth = (size as f64).log(16.0).ceil() as usize; + + for depth in 0..=max_depth { + group.bench_function( + format!("size {size} | updated {updated_leaves}% | depth {depth}"), + |b| { + // Use `iter_custom` to avoid measuring clones and drops + b.iter_custom(|iters| { + let mut elapsed = Duration::ZERO; + + let mut cloned = sparse.clone(); + for _ in 0..iters { + let start = Instant::now(); + cloned.update_rlp_node_level(depth); + elapsed += start.elapsed(); + cloned = sparse.clone(); + } + + elapsed + }) + }, + ); + } + } + } +} + +criterion_group!(rlp_node, update_rlp_node_level); +criterion_main!(rlp_node); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index bc221a8f831..30ce566fb5f 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -1,4 +1,5 @@ #![allow(missing_docs, unreachable_pub)] + use alloy_primitives::{map::HashMap, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use itertools::Itertools; diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8b214d5f7ba..e7ee66c5400 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -77,7 +77,7 @@ impl SparseTrie { /// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. -#[derive(PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] pub struct RevealedSparseTrie { /// All trie nodes. nodes: HashMap, From 1b0f625f1d315473b86fc38153a7c5ed27d27ffa Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 11:06:26 +0000 Subject: [PATCH 200/970] perf(trie): collect only changed sparse nodes at a depth (#12093) --- crates/trie/sparse/src/trie.rs | 47 ++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e7ee66c5400..91362eed527 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -569,27 +569,36 @@ impl RevealedSparseTrie { /// Update hashes of the nodes that are located at a level deeper than or equal to the provided /// depth. Root node has a level of 0. pub fn update_rlp_node_level(&mut self, depth: usize) { - let targets = self.get_nodes_at_depth(depth); let mut prefix_set = self.prefix_set.clone().freeze(); + + let targets = self.get_changed_nodes_at_depth(&mut prefix_set, depth); for target in targets { self.rlp_node(target, &mut prefix_set); } } - /// Returns a list of paths to the nodes that are located at the provided depth when counting - /// from the root node. If there's a leaf at a depth less than the provided depth, it will be - /// included in the result. - fn get_nodes_at_depth(&self, depth: usize) -> Vec { + /// Returns a list of paths to the nodes that were changed according to the prefix set and are + /// located at the provided depth when counting from the root node. If there's a leaf at a + /// depth less than the provided depth, it will be included in the result. + fn get_changed_nodes_at_depth(&self, prefix_set: &mut PrefixSet, depth: usize) -> Vec { let mut paths = Vec::from([(Nibbles::default(), 0)]); let mut targets = Vec::new(); while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { .. } => { + SparseNode::Leaf { hash, .. } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + targets.push(path); } - SparseNode::Extension { key, .. } => { + SparseNode::Extension { key, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + if level >= depth { targets.push(path); } else { @@ -597,7 +606,11 @@ impl RevealedSparseTrie { paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, .. } => { + SparseNode::Branch { state_mask, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + if level >= depth { targets.push(path); } else { @@ -1540,7 +1553,7 @@ mod tests { } #[test] - fn sparse_trie_get_nodes_at_depth() { + fn sparse_trie_get_changed_nodes_at_depth() { let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -1574,10 +1587,16 @@ mod tests { .unwrap(); sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); - assert_eq!(sparse.get_nodes_at_depth(0), vec![Nibbles::default()]); - assert_eq!(sparse.get_nodes_at_depth(1), vec![Nibbles::from_nibbles_unchecked([0x5])]); assert_eq!( - sparse.get_nodes_at_depth(2), + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), + vec![Nibbles::default()] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 1), + vec![Nibbles::from_nibbles_unchecked([0x5])] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 2), vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), @@ -1585,7 +1604,7 @@ mod tests { ] ); assert_eq!( - sparse.get_nodes_at_depth(3), + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 3), vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), @@ -1594,7 +1613,7 @@ mod tests { ] ); assert_eq!( - sparse.get_nodes_at_depth(4), + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 4), vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), From 0d07d27f3c5eea7b3ab9dedecf786fcc78954b1f Mon Sep 17 00:00:00 2001 From: 0xOsiris Date: Mon, 28 Oct 2024 05:00:36 -0700 Subject: [PATCH 201/970] feat: Add version to `BeaconEngineMessage` FCU (#12089) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- .../src/commands/debug_cmd/replay_engine.rs | 13 ++++++++--- crates/consensus/auto-seal/src/task.rs | 3 ++- crates/consensus/beacon/src/engine/handle.rs | 7 ++++-- crates/consensus/beacon/src/engine/message.rs | 4 +++- crates/consensus/beacon/src/engine/mod.rs | 17 +++++++++++--- .../consensus/beacon/src/engine/test_utils.rs | 10 ++++++-- crates/engine/local/src/miner.rs | 4 +++- crates/engine/tree/src/tree/mod.rs | 23 ++++++++++++++----- crates/engine/util/src/engine_store.rs | 7 +++++- crates/engine/util/src/reorg.rs | 20 +++++++++++++--- crates/engine/util/src/skip_fcu.rs | 14 +++++++++-- crates/payload/primitives/src/lib.rs | 11 +++++---- crates/payload/primitives/src/traits.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 5 ++-- 14 files changed, 107 insertions(+), 33 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index e7b3de6b6c1..9314a439265 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,7 +18,9 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, +}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ @@ -166,8 +168,13 @@ impl> Command { debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); match message { StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = - beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; + let response = beacon_engine_handle + .fork_choice_updated( + state, + payload_attrs, + EngineApiMessageVersion::default(), + ) + .await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } StoredEngineApiMessage::NewPayload { payload, sidecar } => { diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index cb0586d4440..75ddda90861 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,7 +3,7 @@ use alloy_rpc_types_engine::ForkchoiceState; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_evm::execute::BlockExecutorProvider; use reth_provider::{CanonChainTracker, StateProviderFactory}; use reth_stages_api::PipelineEvent; @@ -155,6 +155,7 @@ where state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), }); debug!(target: "consensus::auto", ?state, "Sent fork choice update"); diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 4aafc6e07c1..f8840cf78ab 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -60,9 +60,10 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> Result { Ok(self - .send_fork_choice_updated(state, payload_attrs) + .send_fork_choice_updated(state, payload_attrs, version) .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) .await?? .await?) @@ -74,12 +75,14 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx, + version, }); rx } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index e33decbd848..fa7457c1225 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -4,7 +4,7 @@ use alloy_rpc_types_engine::{ ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_payload_primitives::PayloadBuilderError; use std::{ @@ -156,6 +156,8 @@ pub enum BeaconEngineMessage { state: ForkchoiceState, /// The payload attributes for block building. payload_attrs: Option, + /// The Engine API Version. + version: EngineApiMessageVersion, /// The sender for returning forkchoice updated result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 2363b907840..770821de749 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -10,7 +10,7 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineTypes, PayloadTypes}; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, @@ -428,7 +428,12 @@ where } else if let Some(attrs) = attrs { // the CL requested to build a new payload on top of this new VALID head let head = outcome.into_header().unseal(); - self.process_payload_attributes(attrs, head, state) + self.process_payload_attributes( + attrs, + head, + state, + EngineApiMessageVersion::default(), + ) } else { OnForkChoiceUpdated::valid(PayloadStatus::new( PayloadStatusEnum::Valid, @@ -1160,6 +1165,7 @@ where attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, + _version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -1855,7 +1861,12 @@ where // sensitive, hence they are polled first. if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version: _version, + } => { this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 912f0a871bf..6e03aebfa8d 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -19,6 +19,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_engine_primitives::EngineApiMessageVersion; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; @@ -93,7 +94,9 @@ impl TestEnv { &self, state: ForkchoiceState, ) -> Result { - self.engine_handle.fork_choice_updated(state, None).await + self.engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await } /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine @@ -103,7 +106,10 @@ impl TestEnv { state: ForkchoiceState, ) -> Result { loop { - let result = self.engine_handle.fork_choice_updated(state, None).await?; + let result = self + .engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 706ddc43de3..7cebd306309 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -6,7 +6,7 @@ use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, @@ -167,6 +167,7 @@ where state: self.forkchoice_state(), payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??; @@ -193,6 +194,7 @@ where state: self.forkchoice_state(), payload_attrs: Some(self.payload_attributes_builder.build(timestamp)), tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??.await?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 555cf89164f..dd2f67916af 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -26,7 +26,7 @@ use reth_chain_state::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; @@ -969,6 +969,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, + version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); self.metrics.engine.forkchoice_updated_messages.increment(1); @@ -1018,7 +1019,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1038,7 +1039,7 @@ where } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1054,7 +1055,8 @@ where if self.engine_kind.is_opstack() { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); - let updated = self.process_payload_attributes(attr, &canonical_header, state); + let updated = + self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) } } @@ -1206,8 +1208,14 @@ where } EngineApiRequest::Beacon(request) => { match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let mut output = self.on_forkchoice_updated(state, payload_attrs); + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + } => { + let mut output = + self.on_forkchoice_updated(state, payload_attrs, version); if let Ok(res) = &mut output { // track last received forkchoice state @@ -2484,6 +2492,7 @@ where attrs: T::PayloadAttributes, head: &Header, state: ForkchoiceState, + _version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -2808,6 +2817,7 @@ mod tests { state: fcu_state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -3097,6 +3107,7 @@ mod tests { }, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 85c5e126fa4..6b584f0c1f5 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -64,7 +64,12 @@ impl EngineMessageStore { fs::create_dir_all(&self.path)?; // ensure that store path had been created let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis(); match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx: _tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx: _tx, + version: _version, + } => { let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash); fs::write( self.path.join(filename), diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index d109fb9e94a..0d51d2dfab6 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_engine::{ use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; use reth_evm::{ @@ -211,18 +211,32 @@ where state: reorg_forkchoice_state, payload_attrs: None, tx: reorg_fcu_tx, + version: EngineApiMessageVersion::default(), }, ]); *this.state = EngineReorgState::Reorg { queue }; continue } - (Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }), _) => { + ( + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }), + _, + ) => { // Record last forkchoice state forwarded to the engine. // We do not care if it's valid since engine should be able to handle // reorgs that rely on invalid forkchoice state. *this.last_forkchoice_state = Some(state); *this.forkchoice_states_forwarded += 1; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } (item, _) => item, }; diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index e110cecedc8..adadfb595f8 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -45,7 +45,12 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!(target: "engine::stream::skip_fcu", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); @@ -53,7 +58,12 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } next => next, }; diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 08aa428000e..7013d9fd913 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -324,22 +324,23 @@ where } /// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] pub enum EngineApiMessageVersion { /// Version 1 - V1, + V1 = 1, /// Version 2 /// /// Added in the Shanghai hardfork. - V2, + V2 = 2, /// Version 3 /// /// Added in the Cancun hardfork. - V3, + #[default] + V3 = 3, /// Version 4 /// /// Added in the Prague hardfork. - V4, + V4 = 4, } /// Determines how we should choose the payload to return. diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index df76149028a..f6a04375536 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -84,7 +84,7 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [`PayloadId`] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent, attributes and version. fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eb280408ecd..cca9f5d6b64 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -616,7 +616,8 @@ where // To do this, we set the payload attrs to `None` if attribute validation failed, but // we still apply the forkchoice update. if let Err(err) = attr_validation_res { - let fcu_res = self.inner.beacon_consensus.fork_choice_updated(state, None).await?; + let fcu_res = + self.inner.beacon_consensus.fork_choice_updated(state, None, version).await?; // TODO: decide if we want this branch - the FCU INVALID response might be more // useful than the payload attributes INVALID response if fcu_res.is_invalid() { @@ -626,7 +627,7 @@ where } } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs, version).await?) } } From b5c0a46363ad4d32c5f6fbb704c4da3e98e0495b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:03:20 +0700 Subject: [PATCH 202/970] feat: add `pending|queued` txs pool helpers (#12128) --- crates/transaction-pool/src/lib.rs | 14 +++++++++++++ crates/transaction-pool/src/noop.rs | 14 +++++++++++++ crates/transaction-pool/src/pool/mod.rs | 18 +++++++++++++++++ crates/transaction-pool/src/pool/txpool.rs | 23 ++++++++++++++++++++++ crates/transaction-pool/src/traits.rs | 14 ++++++++++++- 5 files changed, 82 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 2cffcd33fa8..609ab987f50 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -503,6 +503,20 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_pending_transactions_by_sender(sender) + } + + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_queued_transactions_by_sender(sender) + } + fn get_highest_transaction_by_sender( &self, sender: Address, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 11c5e7eea29..817ea7bad7a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -220,6 +220,20 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + + fn get_queued_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + fn get_highest_transaction_by_sender( &self, _sender: Address, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 600a8da934e..a408c768410 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -776,6 +776,24 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } + /// Returns all queued transactions of the address by sender + pub(crate) fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().pending_txs_by_sender(sender_id) + } + + /// Returns all pending transactions of the address by sender + pub(crate) fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().queued_txs_by_sender(sender_id) + } + /// Returns the highest transaction of the address pub(crate) fn get_highest_transaction_by_sender( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index c6369c98a7f..b11815fc4b5 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -364,11 +364,26 @@ impl TxPool { self.pending_pool.all() } + /// Returns all pending transactions for the specified sender + pub(crate) fn pending_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns all transactions from parked pools pub(crate) fn queued_transactions(&self) -> Vec>> { self.basefee_pool.all().chain(self.queued_pool.all()).collect() } + /// Returns an iterator over all transactions from parked pools + pub(crate) fn queued_transactions_iter( + &self, + ) -> impl Iterator>> + '_ { + self.basefee_pool.all().chain(self.queued_pool.all()) + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, @@ -377,6 +392,14 @@ impl TxPool { (self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender)) } + /// Returns all queued transactions for the specified sender + pub(crate) fn queued_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns `true` if the transaction with the given hash is already included in this pool. pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool { self.all_transactions.contains(tx_hash) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9db9c53d387..ff6e8855efa 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -352,6 +352,18 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all pending transactions sent by a given user + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + + /// Returns all queued transactions sent by a given user + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + /// Returns the highest transaction sent by a given user fn get_highest_transaction_by_sender( &self, @@ -1332,7 +1344,7 @@ impl TryFrom for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); } unsupported => { // unsupported transaction type From 87a615fe265137fab0831fb9047268addbbe4d2c Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:28:52 +0900 Subject: [PATCH 203/970] fix(ci): remove renaming from `compact-codec` (#12133) --- .github/workflows/compact.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index c5d39f72aec..484b27c820d 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -37,8 +37,7 @@ jobs: # On `main` branch, generates test vectors and serializes them to disk using `Compact`. - name: Generate compact vectors run: | - ${{ matrix.bin }} -- test-vectors compact --write && - for f in ./testdata/micro/compact/*; do mv "$f" "$(dirname "$f")/$(basename "$f" | awk -F '__' '{print $NF}')"; done + ${{ matrix.bin }} -- test-vectors compact --write - name: Checkout PR uses: actions/checkout@v4 with: From d74730af3b8fbbf67d8db25ae43916e67a4b89b6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 28 Oct 2024 16:31:08 +0400 Subject: [PATCH 204/970] feat: add a wrapper for `BestTransactions` prioritizing given senders (#12123) --- crates/transaction-pool/src/pool/best.rs | 86 +++++++++++++++++++++++- crates/transaction-pool/src/pool/mod.rs | 2 +- crates/transaction-pool/src/traits.rs | 4 +- 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 268e3e262c6..763572e7e82 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -2,10 +2,10 @@ use crate::{ identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_primitives::B256 as TxHash; +use alloy_primitives::{Address, B256 as TxHash}; use core::fmt; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, }; @@ -259,6 +259,88 @@ impl fmt::Debug for BestTransactionFilter { } } +/// Wrapper over [`crate::traits::BestTransactions`] that prioritizes transactions of certain +/// senders capping total gas used by such transactions. +#[derive(Debug)] +pub struct BestTransactionsWithPrioritizedSenders { + /// Inner iterator + inner: I, + /// A set of senders which transactions should be prioritized + prioritized_senders: HashSet
, + /// Maximum total gas limit of prioritized transactions + max_prioritized_gas: u64, + /// Buffer with transactions that are not being prioritized. Those will be the first to be + /// included after the prioritized transactions + buffer: VecDeque, + /// Tracker of total gas limit of prioritized transactions. Once it reaches + /// `max_prioritized_gas` no more transactions will be prioritized + prioritized_gas: u64, +} + +impl BestTransactionsWithPrioritizedSenders { + /// Constructs a new [`BestTransactionsWithPrioritizedSenders`]. + pub fn new(prioritized_senders: HashSet
, max_prioritized_gas: u64, inner: I) -> Self { + Self { + inner, + prioritized_senders, + max_prioritized_gas, + buffer: Default::default(), + prioritized_gas: Default::default(), + } + } +} + +impl Iterator for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + type Item = ::Item; + + fn next(&mut self) -> Option { + // If we have space, try prioritizing transactions + if self.prioritized_gas < self.max_prioritized_gas { + for item in &mut self.inner { + if self.prioritized_senders.contains(&item.transaction.sender()) && + self.prioritized_gas + item.transaction.gas_limit() <= + self.max_prioritized_gas + { + self.prioritized_gas += item.transaction.gas_limit(); + return Some(item) + } + self.buffer.push_back(item); + } + } + + if let Some(item) = self.buffer.pop_front() { + Some(item) + } else { + self.inner.next() + } + } +} + +impl crate::traits::BestTransactions for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + fn mark_invalid(&mut self, tx: &Self::Item) { + self.inner.mark_invalid(tx) + } + + fn no_updates(&mut self) { + self.inner.no_updates() + } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + if skip_blobs { + self.buffer.retain(|tx| !tx.transaction.is_eip4844()) + } + self.inner.set_skip_blobs(skip_blobs) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a408c768410..69f17504f79 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -106,7 +106,7 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::BestTransactionFilter; +pub use best::{BestTransactionFilter, BestTransactionsWithPrioritizedSenders}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index ff6e8855efa..fbbddb98f43 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -776,7 +776,9 @@ pub trait BestTransactions: Iterator + Send { /// If called then the iterator will no longer yield blob transactions. /// /// Note: this will also exclude any transactions that depend on blob transactions. - fn skip_blobs(&mut self); + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } /// Controls whether the iterator skips blob transactions or not. /// From 719ca3a68280947dce04de3fea6a58bf246e923a Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:56:28 +0100 Subject: [PATCH 205/970] chain-spec: use alloy `MAINNET_DEPOSIT_CONTRACT_ADDRESS` constant (#12113) --- crates/chainspec/src/constants.rs | 5 +++-- crates/chainspec/src/spec.rs | 7 +++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/chainspec/src/constants.rs b/crates/chainspec/src/constants.rs index 2e22b2299a4..3f46fb6b746 100644 --- a/crates/chainspec/src/constants.rs +++ b/crates/chainspec/src/constants.rs @@ -1,11 +1,12 @@ use crate::spec::DepositContract; -use alloy_primitives::{address, b256}; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::b256; /// Gas per transaction not creating a contract. pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), ); diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 02f4b5ca983..b0958d4fbb5 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,10 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH}; +use alloy_eips::{ + eip1559::INITIAL_BASE_FEE, eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS, + eip7685::EMPTY_REQUESTS_HASH, +}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; @@ -39,7 +42,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), )), From 72096221dfecd611128848f3d4dea0970542e056 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:08:57 +0100 Subject: [PATCH 206/970] refactor(chainspec): refac and improved doc for `last_block_fork_before_merge_or_timestamp` (#12114) --- crates/chainspec/src/spec.rs | 44 +++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index b0958d4fbb5..779eb8a3757 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -512,34 +512,36 @@ impl ChainSpec { } } - /// An internal helper function that returns the block number of the last block-based - /// fork that occurs before any existing TTD (merge)/timestamp based forks. + /// This internal helper function retrieves the block number of the last block-based fork + /// that occurs before: + /// - Any existing Total Terminal Difficulty (TTD) or + /// - Timestamp-based forks in the current [`ChainSpec`]. /// - /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. + /// The function operates by examining the configured hard forks in the chain. It iterates + /// through the fork conditions and identifies the most recent block-based fork that + /// precedes any TTD or timestamp-based conditions. + /// + /// If there are no block-based forks found before these conditions, or if the [`ChainSpec`] + /// is not configured with a TTD or timestamp fork, this function will return `None`. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { - // peek and find the first occurrence of ForkCondition::TTD (merge) , or in - // custom ChainSpecs, the first occurrence of - // ForkCondition::Timestamp. If curr_cond is ForkCondition::Block at - // this point, which it should be in most "normal" ChainSpecs, - // return its block_num + // Match against the `next_cond` to see if it represents: + // - A TTD (merge) + // - A timestamp-based fork match next_cond { - ForkCondition::TTD { fork_block, .. } => { - // handle Sepolia merge netsplit case - if fork_block.is_some() { - return *fork_block - } - // ensure curr_cond is indeed ForkCondition::Block and return block_num - if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) - } - } - ForkCondition::Timestamp(_) => { - // ensure curr_cond is indeed ForkCondition::Block and return block_num + // If the next fork is TTD and specifies a specific block, return that block + // number + ForkCondition::TTD { fork_block: Some(block), .. } => return Some(*block), + + // If the next fork is TTD without a specific block or is timestamp-based, + // return the block number of the current condition if it is block-based. + ForkCondition::TTD { .. } | ForkCondition::Timestamp(_) => { + // Check if `curr_cond` is a block-based fork and return its block number if + // true. if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) + return Some(block_num); } } ForkCondition::Block(_) | ForkCondition::Never => continue, From 1f1c68d65e5534e2b351a3af8fc56711d56ee129 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 12:36:12 +0000 Subject: [PATCH 207/970] perf(trie): cache prefix set lookups in sparse trie (#12088) --- crates/trie/sparse/src/trie.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 91362eed527..035eeaf73e8 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -631,7 +631,7 @@ impl RevealedSparseTrie { fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { // stack of paths we need rlp nodes for - let mut path_stack = Vec::from([path]); + let mut path_stack = Vec::from([(path, None)]); // stack of rlp nodes let mut rlp_node_stack = Vec::<(Nibbles, RlpNode)>::new(); // reusable branch child path @@ -639,7 +639,13 @@ impl RevealedSparseTrie { // reusable branch value stack let mut branch_value_stack_buf = SmallVec::<[RlpNode; 16]>::new_const(); - 'main: while let Some(path) = path_stack.pop() { + 'main: while let Some((path, mut is_in_prefix_set)) = path_stack.pop() { + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); + let rlp_node = match self.nodes.get_mut(&path).unwrap() { SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNode::Hash(hash) => RlpNode::word_rlp(hash), @@ -647,7 +653,7 @@ impl RevealedSparseTrie { self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) } else { let value = self.values.get(&path).unwrap(); @@ -659,7 +665,7 @@ impl RevealedSparseTrie { SparseNode::Extension { key, hash } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) } else if rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { let (_, child) = rlp_node_stack.pop().unwrap(); @@ -668,12 +674,13 @@ impl RevealedSparseTrie { *hash = rlp_node.as_hash(); rlp_node } else { - path_stack.extend([path, child_path]); // need to get rlp node for child first + // need to get rlp node for child first + path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); continue } @@ -700,8 +707,8 @@ impl RevealedSparseTrie { added_children = true; } else { debug_assert!(!added_children); - path_stack.push(path); - path_stack.extend(branch_child_buf.drain(..)); + path_stack.push((path, is_in_prefix_set)); + path_stack.extend(branch_child_buf.drain(..).map(|p| (p, None))); continue 'main } } From e446feb116049c74a9ff21701fe14141e4f8e675 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:44:11 +0000 Subject: [PATCH 208/970] chore(deps): weekly `cargo update` (#12108) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- Cargo.lock | 319 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 6 +- 2 files changed, 159 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6762da237cd..c7a47e8f81f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.40" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4932d790c723181807738cf1ac68198ab581cd699545b155601332541ee47bd" +checksum = "dca4a1469a3e572e9ba362920ff145f5d0a00a3e71a64ddcb4a3659cf64c76a7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -150,9 +150,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6228abfc751a29cde117b0879b805a3e0b3b641358f063272c83ca459a56886" +checksum = "5647fce5a168f9630f935bf7821c4207b1755184edaeba783cb4e11d35058484" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -229,9 +229,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d46eb5871592c216d39192499c95a99f7175cb94104f88c307e6dc960676d9f1" +checksum = "4b5671117c38b1c2306891f97ad3828d85487087f54ebe2c7591a055ea5bcea7" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -276,9 +276,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" +checksum = "514f70ee2a953db21631cd817b13a1571474ec77ddc03d47616d5e8203489fde" dependencies = [ "alloy-consensus", "alloy-eips", @@ -398,9 +398,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -409,13 +409,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -639,7 +639,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -655,7 +655,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "syn-solidity", "tiny-keccak", ] @@ -671,15 +671,15 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f631f0bd9a9d79619b27c91b6b1ab2c4ef4e606a65192369a1ee05d40dcf81cc" +checksum = "45d1fbee9e698f3ba176b6e7a145f4aefe6d2b746b611e8bb246fe11a0e9f6c4" dependencies = [ "serde", "winnow", @@ -687,9 +687,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2841af22d99e2c0f82a78fe107b6481be3dd20b89bfb067290092794734343a" +checksum = "086f41bc6ebcd8cb15f38ba20e47be38dd03692149681ce8061c35d960dbf850" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -813,9 +813,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" dependencies = [ "anstyle", "anstyle-parse", @@ -828,43 +828,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" +checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" [[package]] name = "aquamarine" @@ -877,7 +877,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1053,9 +1053,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "103db485efc3e41214fe4fda9f3dbeae2eb9082f48fd236e6095627a9422066e" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" dependencies = [ "brotli", "flate2", @@ -1100,7 +1100,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1111,7 +1111,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1149,7 +1149,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1255,7 +1255,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1437,7 +1437,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "synstructure", ] @@ -1559,7 +1559,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1570,9 +1570,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -1771,7 +1771,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -1833,9 +1833,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -2228,7 +2228,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2252,7 +2252,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2263,7 +2263,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2385,7 +2385,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2396,7 +2396,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2417,7 +2417,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "unicode-xid", ] @@ -2531,7 +2531,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2679,7 +2679,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2690,7 +2690,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -2747,7 +2747,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -3302,7 +3302,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -3828,7 +3828,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -3978,7 +3978,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -4146,7 +4146,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -4394,7 +4394,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -4570,9 +4570,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "a00419de735aac21d53b0de5ce2c03bd3627277cf471300f27ebc89f7d828047" [[package]] name = "libp2p-identity" @@ -4795,9 +4795,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" dependencies = [ "ahash", "portable-atomic", @@ -4812,14 +4812,14 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.15.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "indexmap 2.6.0", @@ -4831,9 +4831,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69e6ced169644e186e060ddc15f3923fdf06862c811a867bb1e5e7c7824f4d0" +checksum = "57ca8ecd85575fbb143b2678cb123bb818779391ec0f745b1c4a9dbabadde407" dependencies = [ "libc", "libproc", @@ -4847,15 +4847,14 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "metrics", - "num_cpus", "quanta", "sketches-ddsketch", ] @@ -4959,7 +4958,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -5004,7 +5003,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint", "url", ] @@ -5021,12 +5020,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", - "unsigned-varint 0.7.2", + "unsigned-varint", ] [[package]] @@ -5207,7 +5206,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -5260,9 +5259,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d49163f952491820088dd0e66f3a35d63337c3066eceff0a931bf83a8e2101" +checksum = "ba7c98055fd048073738df0cc6d6537e992a0d8828f39d99a469e870db126dbd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5278,9 +5277,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e46c2ab105f679f0cbfbc3fb762f3456d4b8556c841e667fc8f3c2226eb6c1e" +checksum = "d631e8113cf88d30e621022677209caa148a9ca3ccb590fd34bbd1c731e3aff3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5292,9 +5291,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ff1ea317441b9eb6317b24d13f9088e3b14ef48b15bfb6a125ca404df036d8" +checksum = "1eabe7683d7e19c7cc5171d664e49fc449176cf1334ffff82808e2a7eea5933a" dependencies = [ "alloy-consensus", "alloy-network", @@ -5306,9 +5305,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c439457b2a1791325603fc18a94cc175e0b4b1127f11ff8a45071f05d044dcb" +checksum = "9b39574acb1873315e6bd89df174f6223e897188fb87eeea2ad1eda04f7d28eb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5323,9 +5322,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9556293835232b019ec9c6fd84e4265a3151111af60ea09b5b513e3dbed41c" +checksum = "919e9b69212d61f3c8932bfb717c7ad458ea3fc52072b3433d99994f8223d555" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5341,9 +5340,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a42a5ac4e07ed226b6a2aeefaad9b2cc7ec160e372ba626a4214d681a355fc2" +checksum = "0e3a47ea24cee189b4351be247fd138c68571704ee57060cf5a722502f44412c" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -5561,7 +5560,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -5575,29 +5574,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5757,12 +5756,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904afd36257cdb6ce0bee88b7981847bd7b955e5e216bb32f466b302923ad446" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -5813,14 +5812,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] name = "proc-macro2" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -5911,7 +5910,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -6175,9 +6174,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -6731,7 +6730,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -9303,9 +9302,9 @@ dependencies = [ [[package]] name = "revm" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eab16cb0a8cd5ac88b11230b20df588b7e8aae7dfab4b3f830e98aebeb4b365" +checksum = "055bee6a81aaeee8c2389ae31f0d4de87f44df24f4444a1116f9755fd87a76ad" dependencies = [ "auto_impl", "cfg-if", @@ -9578,9 +9577,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -9724,9 +9723,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.2" +version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" +checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" dependencies = [ "sdd", ] @@ -9868,22 +9867,22 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -9918,7 +9917,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -9969,7 +9968,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -9992,7 +9991,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10155,9 +10154,9 @@ checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" @@ -10278,7 +10277,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10336,9 +10335,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.80" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e185e337f816bc8da115b8afcb3324006ccc82eeaddf35113888d3bd8e44ac" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", @@ -10354,7 +10353,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10380,7 +10379,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10457,7 +10456,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10481,22 +10480,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10649,9 +10648,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -10673,7 +10672,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -10874,7 +10873,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11174,12 +11173,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" - [[package]] name = "unsigned-varint" version = "0.8.0" @@ -11271,7 +11264,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11342,7 +11335,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "wasm-bindgen-shared", ] @@ -11376,7 +11369,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11389,9 +11382,9 @@ checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -11532,7 +11525,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11543,7 +11536,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11554,7 +11547,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11565,7 +11558,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11840,7 +11833,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "synstructure", ] @@ -11862,7 +11855,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11882,7 +11875,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", "synstructure", ] @@ -11903,7 +11896,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] @@ -11925,7 +11918,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.85", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d01ee01ce5c..c83d76318e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -527,11 +527,11 @@ url = "2.3" zstd = "0.13" # metrics -metrics = "0.23.0" +metrics = "0.24.0" metrics-derive = "0.1" -metrics-exporter-prometheus = { version = "0.15.0", default-features = false } +metrics-exporter-prometheus = { version = "0.16.0", default-features = false } metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.17.0" } +metrics-util = { default-features = false, version = "0.18.0" } # proc-macros proc-macro2 = "1.0" From 380e237257e4b6005f2d4146bc73d0f4907a5769 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Mon, 28 Oct 2024 20:48:32 +0700 Subject: [PATCH 209/970] refactor: replace receipt envelope encoded with trait (#11742) Co-authored-by: Tuan Tran Co-authored-by: Matthias Seitz Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/primitives/src/receipt.rs | 82 ++++++++++++++++++++++++++++---- crates/rpc/rpc/src/debug.rs | 2 +- 2 files changed, 73 insertions(+), 11 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 940b491e335..21443f482c9 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -5,7 +5,8 @@ use alloc::{vec, vec::Vec}; use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, }; -use alloy_primitives::{Bloom, Bytes, Log, B256}; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; use core::{cmp::Ordering, ops::Deref}; @@ -204,14 +205,20 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { } } -impl ReceiptWithBloom { - /// Returns the enveloped encoded receipt. - /// - /// See also [`ReceiptWithBloom::encode_enveloped`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() +impl Encodable2718 for ReceiptWithBloom { + fn type_flag(&self) -> Option { + match self.receipt.tx_type { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + let encoder = self.as_encoder(); + match self.receipt.tx_type { + TxType::Legacy => encoder.receipt_length(), + _ => 1 + encoder.receipt_length(), // 1 byte for the type prefix + } } /// Encodes the receipt into its "raw" format. @@ -223,10 +230,18 @@ impl ReceiptWithBloom { /// of the receipt: /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, /// logsBloom, logs])` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { + fn encode_2718(&self, out: &mut dyn BufMut) { self.encode_inner(out, false) } + fn encoded_2718(&self) -> Vec { + let mut out = vec![]; + self.encode_2718(&mut out); + out + } +} + +impl ReceiptWithBloom { /// Encode receipt with or without the header data. pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { self.as_encoder().encode_inner(out, with_header) @@ -501,6 +516,7 @@ impl Encodable for ReceiptWithBloomEncoder<'_> { #[cfg(test)] mod tests { use super::*; + use crate::revm_primitives::Bytes; use alloy_primitives::{address, b256, bytes, hex_literal::hex}; #[test] @@ -661,4 +677,50 @@ mod tests { let (decoded, _) = Receipt::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index dd1cd9739ed..6da03b04675 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -940,7 +940,7 @@ where .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().envelope_encoded()) + .map(|receipt| receipt.with_bloom().encoded_2718().into()) .collect()) } From af5ae5a792762d2b02abc31a99807ed185c3b617 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 13:55:31 +0000 Subject: [PATCH 210/970] perf(trie): reduce allocations in sparse trie rlp node calculation (#12092) --- crates/trie/sparse/src/trie.rs | 86 ++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 29 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 035eeaf73e8..11ca100791e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -558,7 +558,7 @@ impl RevealedSparseTrie { pub fn root(&mut self) -> B256 { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node(Nibbles::default(), &mut prefix_set); + let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); if let Some(root_hash) = root_rlp.as_hash() { root_hash } else { @@ -570,10 +570,12 @@ impl RevealedSparseTrie { /// depth. Root node has a level of 0. pub fn update_rlp_node_level(&mut self, depth: usize) { let mut prefix_set = self.prefix_set.clone().freeze(); + let mut buffers = RlpNodeBuffers::default(); let targets = self.get_changed_nodes_at_depth(&mut prefix_set, depth); for target in targets { - self.rlp_node(target, &mut prefix_set); + buffers.path_stack.push((target, Some(true))); + self.rlp_node(&mut prefix_set, &mut buffers); } } @@ -629,17 +631,13 @@ impl RevealedSparseTrie { targets } - fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { - // stack of paths we need rlp nodes for - let mut path_stack = Vec::from([(path, None)]); - // stack of rlp nodes - let mut rlp_node_stack = Vec::<(Nibbles, RlpNode)>::new(); - // reusable branch child path - let mut branch_child_buf = SmallVec::<[Nibbles; 16]>::new_const(); - // reusable branch value stack - let mut branch_value_stack_buf = SmallVec::<[RlpNode; 16]>::new_const(); - - 'main: while let Some((path, mut is_in_prefix_set)) = path_stack.pop() { + fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_path(path); + self.rlp_node(prefix_set, &mut buffers) + } + + fn rlp_node(&mut self, prefix_set: &mut PrefixSet, buffers: &mut RlpNodeBuffers) -> RlpNode { + 'main: while let Some((path, mut is_in_prefix_set)) = buffers.path_stack.pop() { // Check if the path is in the prefix set. // First, check the cached value. If it's `None`, then check the prefix set, and update // the cached value. @@ -667,63 +665,68 @@ impl RevealedSparseTrie { child_path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) - } else if rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); + } else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); rlp_node } else { // need to get rlp node for child first - path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); + buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } SparseNode::Branch { state_mask, hash } => { if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); continue } - branch_child_buf.clear(); + buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first // from the stack. for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); child.push_unchecked(bit); - branch_child_buf.push(child); + buffers.branch_child_buf.push(child); } } - branch_value_stack_buf.resize(branch_child_buf.len(), Default::default()); + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; - for (i, child_path) in branch_child_buf.iter().enumerate() { - if rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); // Insert children in the resulting buffer in a normal order, because // initially we iterated in reverse. - branch_value_stack_buf[branch_child_buf.len() - i - 1] = child; + buffers.branch_value_stack_buf + [buffers.branch_child_buf.len() - i - 1] = child; added_children = true; } else { debug_assert!(!added_children); - path_stack.push((path, is_in_prefix_set)); - path_stack.extend(branch_child_buf.drain(..).map(|p| (p, None))); + buffers.path_stack.push((path, is_in_prefix_set)); + buffers + .path_stack + .extend(buffers.branch_child_buf.drain(..).map(|p| (p, None))); continue 'main } } self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) + let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) .rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); rlp_node } }; - rlp_node_stack.push((path, rlp_node)); + buffers.rlp_node_stack.push((path, rlp_node)); } - rlp_node_stack.pop().unwrap().1 + buffers.rlp_node_stack.pop().unwrap().1 } } @@ -803,6 +806,31 @@ struct RemovedSparseNode { unset_branch_nibble: Option, } +/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`]. +#[derive(Debug, Default)] +struct RlpNodeBuffers { + /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. + path_stack: Vec<(Nibbles, Option)>, + /// Stack of rlp nodes + rlp_node_stack: Vec<(Nibbles, RlpNode)>, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, +} + +impl RlpNodeBuffers { + /// Creates a new instance of buffers with the given path on the stack. + fn new_with_path(path: Nibbles) -> Self { + Self { + path_stack: vec![(path, None)], + rlp_node_stack: Vec::new(), + branch_child_buf: SmallVec::<[Nibbles; 16]>::new_const(), + branch_value_stack_buf: SmallVec::<[RlpNode; 16]>::new_const(), + } + } +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; From 3f4634ccbc4cbf64720627872ead3155259ce9cc Mon Sep 17 00:00:00 2001 From: 0xOsiris Date: Mon, 28 Oct 2024 07:37:36 -0700 Subject: [PATCH 211/970] chore: add version to PayloadBuilderAttributes::try_new (#12137) --- bin/reth/src/commands/debug_cmd/build_block.rs | 5 ++++- crates/consensus/beacon/src/engine/mod.rs | 3 ++- crates/engine/tree/src/tree/mod.rs | 3 ++- crates/ethereum/engine-primitives/src/payload.rs | 6 +++++- crates/optimism/payload/src/payload.rs | 6 +++++- crates/payload/primitives/src/traits.rs | 1 + examples/custom-engine-types/src/main.rs | 6 +++++- 7 files changed, 24 insertions(+), 6 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 455d8356aff..272f107d3c4 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -22,7 +22,9 @@ use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, +}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::database::CachedReads; use reth_primitives::{ @@ -227,6 +229,7 @@ impl> Command { reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, + EngineApiMessageVersion::default() as u8, )?, ); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 770821de749..a00f507dbd9 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1165,7 +1165,7 @@ where attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, - _version: EngineApiMessageVersion, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -1183,6 +1183,7 @@ where match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( state.head_block_hash, attrs, + version as u8 ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index dd2f67916af..bc070d87345 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2492,7 +2492,7 @@ where attrs: T::PayloadAttributes, head: &Header, state: ForkchoiceState, - _version: EngineApiMessageVersion, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -2510,6 +2510,7 @@ where match ::try_new( state.head_block_hash, attrs, + version as u8, ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 420352cf2b9..2d162ef1505 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -237,7 +237,11 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: PayloadAttributes, + _version: u8, + ) -> Result { Ok(Self::new(parent, attributes)) } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 98b0e41b0f5..f46891fdc44 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -43,7 +43,11 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: OpPayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: OpPayloadAttributes, + _version: u8, + ) -> Result { let id = payload_id_optimism(&parent, &attributes); let transactions = attributes diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index f6a04375536..a78dc8c1322 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -88,6 +88,7 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, ) -> Result where Self: Sized; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 46a7d7d9af9..0fa2b1658f8 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -110,7 +110,11 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { type RpcPayloadAttributes = CustomPayloadAttributes; type Error = Infallible; - fn try_new(parent: B256, attributes: CustomPayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: CustomPayloadAttributes, + _version: u8, + ) -> Result { Ok(Self(EthPayloadBuilderAttributes::new(parent, attributes.inner))) } From 12762775686c6429d13107b69a2708acdcece21f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Oct 2024 15:57:58 +0000 Subject: [PATCH 212/970] test(trie): use proptest to generate random values (#12140) --- crates/trie/sparse/src/trie.rs | 126 ++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 57 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 11ca100791e..9db1dff5313 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -836,13 +836,12 @@ mod tests { use std::collections::BTreeMap; use super::*; - use alloy_primitives::U256; + use alloy_primitives::{map::HashSet, U256}; use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; use rand::seq::IteratorRandom; - use reth_testing_utils::generators; use reth_trie::{BranchNode, ExtensionNode, LeafNode}; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, @@ -1304,6 +1303,7 @@ mod tests { ); } + #[allow(clippy::type_complexity)] #[test] fn sparse_trie_fuzz() { // Having only the first 3 nibbles set, we narrow down the range of keys @@ -1311,63 +1311,51 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: I) - where - I: IntoIterator, - T: IntoIterator)> + Clone, - { - let mut rng = generators::rng(); + fn test(updates: Vec<(HashMap>, HashSet)>) { + { + let mut state = BTreeMap::default(); + let mut sparse = RevealedSparseTrie::default(); - let mut state = BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, value) in update.clone() { + sparse.update_leaf(key, value).unwrap(); + } + let sparse_root = sparse.root(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); - for update in updates { - let mut count = 0; - // Insert state updates into the sparse trie and calculate the root - for (key, value) in update.clone() { - sparse.update_leaf(key, value).unwrap(); - count += 1; - } - let keys_to_delete_len = count / 2; - let sparse_root = sparse.root(); - - // Insert state updates into the hash builder and calculate the root - state.extend(update); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); - - // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); - // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - - // Delete some keys from both the hash builder and the sparse trie and check - // that the sparse trie root still matches the hash builder root - - let keys_to_delete = state - .keys() - .choose_multiple(&mut rng, keys_to_delete_len) - .into_iter() - .cloned() - .collect::>(); - for key in keys_to_delete { - state.remove(&key).unwrap(); - sparse.remove_leaf(&key).unwrap(); - } + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in keys_to_delete { + state.remove(&key).unwrap(); + sparse.remove_leaf(&key).unwrap(); + } - let sparse_root = sparse.root(); + let sparse_root = sparse.root(); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); - // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); - // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } } } @@ -1379,17 +1367,41 @@ mod tests { base } + fn transform_updates( + updates: Vec>>, + mut rng: impl Rng, + ) -> Vec<(HashMap>, HashSet)> { + let mut keys = HashSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().cloned()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = keys.iter().choose(&mut rng).unwrap().clone(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + proptest!(ProptestConfig::with_cases(10), |( updates in proptest::collection::vec( proptest::collection::hash_map( any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), any::>(), 1..100, - ), + ).prop_map(HashMap::from_iter), 1..100, - ) + ).prop_perturb(transform_updates) )| { - test(updates) }); + test(updates) + }); } /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has From b36b021aa27ee8fa931e1833d39806c6773ac63d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 28 Oct 2024 23:59:26 +0800 Subject: [PATCH 213/970] chore(rpc): define trait `RpcNodeCoreExt` and replace `LoadBlock::cache` (#12141) --- crates/optimism/rpc/src/eth/block.rs | 8 +------- crates/optimism/rpc/src/eth/mod.rs | 13 +++++++++++-- crates/rpc/rpc-eth-api/src/helpers/block.rs | 10 ++-------- crates/rpc/rpc-eth-api/src/lib.rs | 2 +- crates/rpc/rpc-eth-api/src/node.rs | 8 ++++++++ crates/rpc/rpc/src/eth/core.rs | 12 ++++++++++++ crates/rpc/rpc/src/eth/helpers/block.rs | 6 +----- 7 files changed, 36 insertions(+), 23 deletions(-) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index ed31a750949..85f36570f2e 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -4,7 +4,6 @@ use alloy_rpc_types::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::FullNodeComponents; use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; use reth_provider::HeaderProvider; @@ -12,7 +11,6 @@ use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcNodeCore, RpcReceipt, }; -use reth_rpc_eth_types::EthStateCache; use crate::{OpEthApi, OpEthApiError, OpReceiptBuilder}; @@ -80,10 +78,6 @@ where impl LoadBlock for OpEthApi where Self: LoadPendingBlock + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 9b04e1c730a..b69b5ef2821 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -30,7 +30,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, RpcNodeCore, + EthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -116,7 +116,6 @@ where impl RpcNodeCore for OpEthApi where - Self: Clone, N: RpcNodeCore, { type Provider = N::Provider; @@ -141,6 +140,16 @@ where } } +impl RpcNodeCoreExt for OpEthApi +where + N: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + impl EthApiSpec for OpEthApi where N: RpcNodeCore< diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 217e84a4754..c777c64d420 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -6,10 +6,9 @@ use alloy_rpc_types::{Header, Index}; use futures::Future; use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::EthStateCache; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; @@ -196,12 +195,7 @@ pub trait EthBlocks: LoadBlock { /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. fn block_with_senders( &self, diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index bc46d526c6f..fa9737f84f0 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -26,7 +26,7 @@ pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; -pub use node::RpcNodeCore; +pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 950271dfcb1..463f508f7ef 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -1,6 +1,7 @@ //! Helper trait for interfacing with [`FullNodeComponents`]. use reth_node_api::FullNodeComponents; +use reth_rpc_eth_types::EthStateCache; /// Helper trait to relax trait bounds on [`FullNodeComponents`]. /// @@ -56,3 +57,10 @@ where FullNodeComponents::provider(self) } } + +/// Additional components, asides the core node components, needed to run `eth_` namespace API +/// server. +pub trait RpcNodeCoreExt: RpcNodeCore { + /// Returns handle to RPC cache service. + fn cache(&self) -> &EthStateCache; +} diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 3fca76e8b0c..339f2200c67 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,6 +10,7 @@ use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, + node::RpcNodeCoreExt, EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ @@ -169,6 +170,17 @@ where } } +impl RpcNodeCoreExt + for EthApi +where + Self: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + impl std::fmt::Debug for EthApi { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index a869cbd5403..d5341d0b22b 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -8,7 +8,7 @@ use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; use crate::EthApi; @@ -68,8 +68,4 @@ where Self: LoadPendingBlock + SpawnBlocking, Provider: BlockReaderIdExt, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } From 06d73eec8aa4068e039b2c3ec28fe0b7682fd9e2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 00:35:26 +0800 Subject: [PATCH 214/970] chore(rpc): inline trait methods of `RpcNodeCore` impl (#12144) --- crates/optimism/rpc/src/eth/mod.rs | 4 ++++ crates/rpc/rpc-eth-api/src/node.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index b69b5ef2821..ae463d7158b 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -123,18 +123,22 @@ where type Network = ::Network; type Evm = ::Evm; + #[inline] fn pool(&self) -> &Self::Pool { self.inner.pool() } + #[inline] fn evm_config(&self) -> &Self::Evm { self.inner.evm_config() } + #[inline] fn network(&self) -> &Self::Network { self.inner.network() } + #[inline] fn provider(&self) -> &Self::Provider { self.inner.provider() } diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 463f508f7ef..851b26b72b9 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -41,18 +41,22 @@ where type Network = ::Network; type Evm = ::Evm; + #[inline] fn pool(&self) -> &Self::Pool { FullNodeComponents::pool(self) } + #[inline] fn evm_config(&self) -> &Self::Evm { FullNodeComponents::evm_config(self) } + #[inline] fn network(&self) -> &Self::Network { FullNodeComponents::network(self) } + #[inline] fn provider(&self) -> &Self::Provider { FullNodeComponents::provider(self) } From 0733da9e12d1daa8febbc6e9fc4131c2acfe6798 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 00:37:11 +0800 Subject: [PATCH 215/970] chore(rpc): relax `FullNodeComponents` trait bound on `OpEthApi` to `RpcNodeCore` (#12142) --- crates/optimism/rpc/src/eth/call.rs | 2 +- crates/optimism/rpc/src/eth/mod.rs | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index c8f8200bc69..9ddf7b3855b 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -22,9 +22,9 @@ where impl Call for OpEthApi where - N: RpcNodeCore, Self: LoadState> + SpawnBlocking, Self::Error: From, + N: RpcNodeCore, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index ae463d7158b..83e79078c25 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -17,7 +17,6 @@ use op_alloy_network::Optimism; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; use reth_primitives::Header; use reth_provider::{ @@ -177,7 +176,7 @@ where impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -237,7 +236,7 @@ where impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn max_proof_window(&self) -> u64 { @@ -248,7 +247,7 @@ where impl EthFees for OpEthApi where Self: LoadFee, - N: FullNodeComponents, + N: RpcNodeCore, { } @@ -261,10 +260,10 @@ where impl AddDevSigners for OpEthApi where - N: FullNodeComponents>, + N: RpcNodeCore, { fn with_dev_accounts(&self) { - *self.signers().write() = DevSigner::random_signers(20) + *self.inner.signers().write() = DevSigner::random_signers(20) } } From 473026f40a665cbdfb1125d50012d6403601f2af Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 00:51:45 +0800 Subject: [PATCH 216/970] chore(rpc): remove redundant `LoadFee::cache` (#12146) --- crates/optimism/rpc/src/eth/mod.rs | 5 ----- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 11 +++-------- crates/rpc/rpc/src/eth/helpers/fees.rs | 7 +------ 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 83e79078c25..52acf424035 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -204,11 +204,6 @@ where + StateProviderFactory, >, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - #[inline] fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index dcde2214a5d..18d2d631148 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -6,8 +6,8 @@ use futures::Future; use reth_chainspec::EthChainSpec; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, - FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, + fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, + FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use tracing::debug; @@ -172,7 +172,7 @@ pub trait EthFees: LoadFee { // Percentiles were specified, so we need to collect reward percentile ino if let Some(percentiles) = &reward_percentiles { - let (block, receipts) = LoadFee::cache(self) + let (block, receipts) = self.cache() .get_block_and_receipts(header.hash()) .await .map_err(Self::Error::from_eth_err)? @@ -242,11 +242,6 @@ pub trait EthFees: LoadFee { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. pub trait LoadFee: LoadBlock { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 2c5db5bacba..e1a17ef647c 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -3,7 +3,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use crate::EthApi; @@ -20,11 +20,6 @@ where + ChainSpecProvider + StateProviderFactory, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - #[inline] fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() From 3d62bfde14c469f3fea089b90dc0559ef4e096ae Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 02:09:58 +0800 Subject: [PATCH 217/970] chore(rpc): add super trait `RpcNodeCoreExt` to `LoadReceipt` (#12149) --- crates/optimism/rpc/src/eth/receipt.rs | 10 +++------- crates/rpc/rpc-eth-api/src/helpers/block.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 10 ++-------- crates/rpc/rpc/src/eth/helpers/receipt.rs | 11 +++-------- 4 files changed, 10 insertions(+), 24 deletions(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f2f09cdc7ff..3f2a81573e2 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -11,7 +11,7 @@ use reth_optimism_forks::OptimismHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; use crate::{OpEthApi, OpEthApiError}; @@ -20,18 +20,14 @@ where Self: Send + Sync, N: FullNodeComponents>, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, ) -> Result, Self::Error> { - let (block, receipts) = LoadReceipt::cache(self) + let (block, receipts) = self + .cache() .get_block_and_receipts(meta.block_hash) .await .map_err(Self::Error::from_eth_err)? diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index c777c64d420..fa397db35e0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -145,7 +145,8 @@ pub trait EthBlocks: LoadBlock { if let Some(block_hash) = self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return LoadReceipt::cache(self) + return self + .cache() .get_block_and_receipts(block_hash) .await .map_err(Self::Error::from_eth_err) diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index eae99bbe45d..48394f1cd6b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,19 +3,13 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::EthStateCache; -use crate::{EthApiTypes, RpcReceipt}; +use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + Send + Sync { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 570ec4fa3c0..d0cb5867eac 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -2,20 +2,15 @@ use alloy_serde::WithOtherFields; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; +use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; use crate::EthApi; impl LoadReceipt for EthApi where - Self: Send + Sync, + Self: RpcNodeCoreExt, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, From 28f8c47dc0642e0bbd5f2abd162e003c22349eec Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 02:10:18 +0800 Subject: [PATCH 218/970] chore(rpc): remove redundant `LoadTransaction::cache` (#12148) --- crates/optimism/rpc/src/eth/transaction.rs | 6 +----- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 14 ++++++-------- crates/rpc/rpc/src/eth/helpers/transaction.rs | 5 ----- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 5135b13a2de..3345ac5d452 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -12,7 +12,7 @@ use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; +use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use crate::{OpEthApi, SequencerClient}; @@ -59,10 +59,6 @@ where Self: SpawnBlocking + FullEthApiTypes, N: RpcNodeCore, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } impl OpEthApi diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index af647fedf2c..3c526cbb025 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -15,14 +15,15 @@ use reth_primitives::{ use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, - EthApiError, EthStateCache, SignError, TransactionSource, + EthApiError, SignError, TransactionSource, }; use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_block_context}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; use crate::{ - FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcReceipt, RpcTransaction, + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + RpcTransaction, }; use super::{ @@ -461,13 +462,10 @@ pub trait EthTransactions: LoadTransaction { /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. pub trait LoadTransaction: - SpawnBlocking + FullEthApiTypes + RpcNodeCore + SpawnBlocking + + FullEthApiTypes + + RpcNodeCoreExt { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns the transaction by hash. /// /// Checks the pool and state. diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 623db35e5ad..8ac0785b262 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -5,7 +5,6 @@ use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FullEthApiTypes, RpcNodeCore, }; -use reth_rpc_eth_types::EthStateCache; use reth_transaction_pool::TransactionPool; use crate::EthApi; @@ -28,10 +27,6 @@ where + FullEthApiTypes + RpcNodeCore, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } #[cfg(test)] From 37d96436073134136f625cd8f33862b39480af0f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 29 Oct 2024 02:10:30 +0800 Subject: [PATCH 219/970] chore(rpc): remove redundant `LoadState::cache` (#12147) --- crates/optimism/rpc/src/eth/mod.rs | 9 ++------- crates/rpc/rpc-eth-api/src/helpers/state.rs | 11 +++-------- crates/rpc/rpc/src/eth/helpers/state.rs | 10 ++-------- 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 52acf424035..7b427467b2c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -215,17 +215,12 @@ where } } -impl LoadState for OpEthApi -where +impl LoadState for OpEthApi where N: RpcNodeCore< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, - >, + > { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } impl EthState for OpEthApi diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 87e66cb7481..97c94b94932 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -13,12 +13,12 @@ use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; +use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError, RpcNodeCore}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; @@ -170,17 +170,12 @@ pub trait EthState: LoadState + SpawnBlocking { /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. pub trait LoadState: EthApiTypes - + RpcNodeCore< + + RpcNodeCoreExt< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, > { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 8c958ea2ae2..a3e909cf6f6 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -8,7 +8,6 @@ use reth_rpc_eth_api::{ helpers::{EthState, LoadState, SpawnBlocking}, RpcNodeCore, }; -use reth_rpc_eth_types::EthStateCache; use crate::EthApi; @@ -21,17 +20,12 @@ where } } -impl LoadState for EthApi -where +impl LoadState for EthApi where Self: RpcNodeCore< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, - >, + > { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } #[cfg(test)] From ddc9bda315c0815369794f51bc232dff0ac9f43e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:27:56 -0400 Subject: [PATCH 220/970] fix(op): fix payload id calculation (#11730) Co-authored-by: Matthias Seitz --- crates/optimism/payload/src/payload.rs | 76 +++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 8 deletions(-) diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index f46891fdc44..7f95d04ad9f 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -3,7 +3,7 @@ //! Optimism builder support use alloy_eips::{eip2718::Decodable2718, eip7685::Requests}; -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{keccak256, Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; /// Re-export for use in downstream arguments. @@ -46,9 +46,9 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { fn try_new( parent: B256, attributes: OpPayloadAttributes, - _version: u8, + version: u8, ) -> Result { - let id = payload_id_optimism(&parent, &attributes); + let id = payload_id_optimism(&parent, &attributes, version); let transactions = attributes .transactions @@ -281,7 +281,11 @@ impl From for OpExecutionPayloadEnvelopeV4 { /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttributes) -> PayloadId { +pub(crate) fn payload_id_optimism( + parent: &B256, + attributes: &OpPayloadAttributes, + payload_version: u8, +) -> PayloadId { use sha2::Digest; let mut hasher = sha2::Sha256::new(); hasher.update(parent.as_slice()); @@ -299,15 +303,71 @@ pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttribute } let no_tx_pool = attributes.no_tx_pool.unwrap_or_default(); - hasher.update([no_tx_pool as u8]); - if let Some(txs) = &attributes.transactions { - txs.iter().for_each(|tx| hasher.update(tx)); + if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) { + hasher.update([no_tx_pool as u8]); + let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default(); + hasher.update(&txs_len.to_be_bytes()[..]); + if let Some(txs) = &attributes.transactions { + for tx in txs { + // we have to just hash the bytes here because otherwise we would need to decode + // the transactions here which really isn't ideal + let tx_hash = keccak256(tx); + // maybe we can try just taking the hash and not decoding + hasher.update(tx_hash) + } + } } if let Some(gas_limit) = attributes.gas_limit { hasher.update(gas_limit.to_be_bytes()); } - let out = hasher.finalize(); + if let Some(eip_1559_params) = attributes.eip_1559_params { + hasher.update(eip_1559_params.as_slice()); + } + + let mut out = hasher.finalize(); + out[0] = payload_version; PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::OpPayloadAttributes; + use alloy_primitives::{address, b256, bytes, FixedBytes}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_payload_primitives::EngineApiMessageVersion; + use std::str::FromStr; + + #[test] + fn test_payload_id_parity_op_geth() { + // INFO rollup_boost::server:received fork_choice_updated_v3 from builder and l2_client + // payload_id_builder="0x6ef26ca02318dcf9" payload_id_l2="0x03d2dae446d2a86a" + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x03d2dae446d2a86a").unwrap().into()); + let attrs = OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into() + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: None, + gas_limit: Some(30000000), + eip_1559_params: None, + }; + + // Reth's `PayloadId` should match op-geth's `PayloadId`. This fails + assert_eq!( + expected, + payload_id_optimism( + &b256!("3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V3 as u8 + ) + ); + } +} From 05ee75f32c95600f0010455ccaf66ed6187b2c59 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 28 Oct 2024 23:31:10 +0100 Subject: [PATCH 221/970] fix: restrict concurrent incoming connections (#12150) --- crates/net/network-types/src/peers/state.rs | 6 ++ crates/net/network/src/peers.rs | 62 +++++++++++++++++++-- 2 files changed, 63 insertions(+), 5 deletions(-) diff --git a/crates/net/network-types/src/peers/state.rs b/crates/net/network-types/src/peers/state.rs index f6ab1a39f85..1e2466c805a 100644 --- a/crates/net/network-types/src/peers/state.rs +++ b/crates/net/network-types/src/peers/state.rs @@ -31,6 +31,12 @@ impl PeerConnectionState { } } + /// Returns true if this is the idle state. + #[inline] + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } + /// Returns true if this is an active incoming connection. #[inline] pub const fn is_incoming(&self) -> bool { diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 3d5ff7a0d43..016e7fa8d92 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -218,6 +218,11 @@ impl PeersManager { self.backed_off_peers.len() } + /// Returns the number of idle trusted peers. + fn num_idle_trusted_peers(&self) -> usize { + self.peers.iter().filter(|(_, peer)| peer.kind.is_trusted() && peer.state.is_idle()).count() + } + /// Invoked when a new _incoming_ tcp connection is accepted. /// /// returns an error if the inbound ip address is on the ban list @@ -229,9 +234,34 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned) } - if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { - // if we don't have any inbound slots and no trusted peers, we don't accept any new - // connections + // check if we even have slots for a new incoming connection + if !self.connection_info.has_in_capacity() { + if self.trusted_peer_ids.is_empty() { + // if we don't have any incoming slots and no trusted peers, we don't accept any new + // connections + return Err(InboundConnectionError::ExceedsCapacity) + } + + // there's an edge case here where no incoming connections besides from trusted peers + // are allowed (max_inbound == 0), in which case we still need to allow new pending + // incoming connections until all trusted peers are connected. + let num_idle_trusted_peers = self.num_idle_trusted_peers(); + if num_idle_trusted_peers <= self.trusted_peer_ids.len() { + // we still want to limit concurrent pending connections + let max_inbound = + self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound); + if self.connection_info.num_pending_in <= max_inbound { + self.connection_info.inc_pending_in(); + } + return Ok(()) + } + + // all trusted peers are either connected or connecting + return Err(InboundConnectionError::ExceedsCapacity) + } + + // also cap the incoming connections we can process at once + if !self.connection_info.has_in_pending_capacity() { return Err(InboundConnectionError::ExceedsCapacity) } @@ -968,17 +998,22 @@ impl ConnectionInfo { Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } } - /// Returns `true` if there's still capacity for a new outgoing connection. + /// Returns `true` if there's still capacity to perform an outgoing connection. const fn has_out_capacity(&self) -> bool { self.num_pending_out < self.config.max_concurrent_outbound_dials && self.num_outbound < self.config.max_outbound } - /// Returns `true` if there's still capacity for a new incoming connection. + /// Returns `true` if there's still capacity to accept a new incoming connection. const fn has_in_capacity(&self) -> bool { self.num_inbound < self.config.max_inbound } + /// Returns `true` if we can handle an additional incoming pending connection. + const fn has_in_pending_capacity(&self) -> bool { + self.num_pending_in < self.config.max_inbound + } + fn decr_state(&mut self, state: PeerConnectionState) { match state { PeerConnectionState::Idle => {} @@ -1597,6 +1632,23 @@ mod tests { assert_eq!(peers.connection_info.num_pending_in, 0); } + #[tokio::test] + async fn test_reject_incoming_at_pending_capacity() { + let mut peers = PeersManager::default(); + + for count in 1..=peers.connection_info.config.max_inbound { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, count as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + assert_eq!(peers.connection_info.num_pending_in, count); + } + assert!(peers.connection_info.has_in_capacity()); + assert!(!peers.connection_info.has_in_pending_capacity()); + + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 100)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); + } + #[tokio::test] async fn test_closed_incoming() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); From 367eb44177b3cba8dae4c98acf0cdc93273e745e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 00:18:37 +0100 Subject: [PATCH 222/970] chore: remove one unwrap (#12152) --- crates/net/ecies/src/algorithm.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 83dcc657bce..6bf9717fe52 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -650,6 +650,7 @@ impl ECIES { out.extend_from_slice(tag.as_slice()); } + /// Extracts the header from slice and returns the body size. pub fn read_header(&mut self, data: &mut [u8]) -> Result { // If the data is not large enough to fit the header and mac bytes, return an error // @@ -677,7 +678,7 @@ impl ECIES { self.body_size = Some(body_size); - Ok(self.body_size.unwrap()) + Ok(body_size) } pub const fn header_len() -> usize { From 462157880c5cf27408c304d9ce8dd32e10201800 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Tue, 29 Oct 2024 00:51:20 +0100 Subject: [PATCH 223/970] dev: track invalid transactions by sender in pool (#12138) --- crates/transaction-pool/src/pool/best.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 763572e7e82..6ade15be7d8 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,14 +1,14 @@ use crate::{ - identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, - TransactionOrdering, ValidPoolTransaction, + identifier::{SenderId, TransactionId}, + pool::pending::PendingTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_primitives::{Address, B256 as TxHash}; +use alloy_primitives::Address; use core::fmt; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, }; - use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; @@ -80,7 +80,7 @@ pub(crate) struct BestTransactions { /// then can be moved from the `all` set to the `independent` set. pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. - pub(crate) invalid: HashSet, + pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this /// iterator was static fileted /// @@ -94,7 +94,7 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { - self.invalid.insert(*tx.hash()); + self.invalid.insert(tx.sender_id()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -168,14 +168,14 @@ impl Iterator for BestTransactions { self.add_new_transactions(); // Remove the next independent tx with the highest priority let best = self.independent.pop_last()?; - let hash = best.transaction.hash(); + let sender_id = best.transaction.sender_id(); - // skip transactions that were marked as invalid - if self.invalid.contains(hash) { + // skip transactions for which sender was marked as invalid + if self.invalid.contains(&sender_id) { debug!( target: "txpool", "[{:?}] skipping invalid transaction", - hash + best.transaction.hash() ); continue } @@ -186,7 +186,7 @@ impl Iterator for BestTransactions { } if self.skip_blobs && best.transaction.transaction.is_eip4844() { - // blobs should be skipped, marking the as invalid will ensure that no dependent + // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned self.mark_invalid(&best.transaction) } else { From 0297b8f6949d2354c1d68df5360cc375dc889879 Mon Sep 17 00:00:00 2001 From: zilayo <84344709+zilayo@users.noreply.github.com> Date: Mon, 28 Oct 2024 23:59:52 +0000 Subject: [PATCH 224/970] fix: use net::discv5 for reth's discv5 tracing target namespace (reverts #12045) (#12151) --- crates/net/discv5/src/config.rs | 4 ++-- crates/net/discv5/src/lib.rs | 30 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 0684c263b8c..203ef76134b 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,7 +412,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" @@ -429,7 +429,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index a8154b7bd19..da54d0b5266 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -95,14 +95,14 @@ impl Discv5 { /// CAUTION: The value **must** be rlp encoded pub fn set_eip868_in_local_enr(&self, key: Vec, rlp: Bytes) { let Ok(key_str) = std::str::from_utf8(&key) else { - error!(target: "discv5", + error!(target: "net::discv5", err="key not utf-8", "failed to update local enr" ); return }; if let Err(err) = self.discv5.enr_insert(key_str, &rlp) { - error!(target: "discv5", + error!(target: "net::discv5", %err, "failed to update local enr" ); @@ -131,7 +131,7 @@ impl Discv5 { self.discv5.ban_node(&node_id, None); self.ban_ip(ip); } - Err(err) => error!(target: "discv5", + Err(err) => error!(target: "net::discv5", %err, "failed to ban peer" ), @@ -167,7 +167,7 @@ impl Discv5 { // let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, "local ENR" ); @@ -271,7 +271,7 @@ impl Discv5 { // to them over RLPx, to be compatible with EL discv5 implementations that don't // enforce this security measure. - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, %socket, "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" @@ -296,7 +296,7 @@ impl Discv5 { let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "discv5", + trace!(target: "net::discv5", %err, ?enr, "discovered peer is unreachable" @@ -308,7 +308,7 @@ impl Discv5 { } }; if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, reason, "filtered out discovered peer" @@ -324,7 +324,7 @@ impl Discv5 { .then(|| self.get_fork_id(enr).ok()) .flatten(); - trace!(target: "discv5", + trace!(target: "net::discv5", ?fork_id, ?enr, "discovered peer" @@ -491,7 +491,7 @@ pub async fn bootstrap( bootstrap_nodes: HashSet, discv5: &Arc, ) -> Result<(), Error> { - trace!(target: "discv5", + trace!(target: "net::discv5", ?bootstrap_nodes, "adding bootstrap nodes .." ); @@ -508,7 +508,7 @@ pub async fn bootstrap( let discv5 = discv5.clone(); enr_requests.push(async move { if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "discv5", + debug!(target: "net::discv5", ?enode, %err, "failed adding boot node" @@ -545,7 +545,7 @@ pub fn spawn_populate_kbuckets_bg( for i in (0..bootstrap_lookup_countdown).rev() { let target = discv5::enr::NodeId::random(); - trace!(target: "discv5", + trace!(target: "net::discv5", %target, bootstrap_boost_runs_countdown=i, lookup_interval=format!("{:#?}", pulse_lookup_interval), @@ -563,7 +563,7 @@ pub fn spawn_populate_kbuckets_bg( // selection (ref kademlia) let target = get_lookup_target(kbucket_index, local_node_id); - trace!(target: "discv5", + trace!(target: "net::discv5", %target, lookup_interval=format!("{:#?}", lookup_interval), "starting periodic lookup query" @@ -628,11 +628,11 @@ pub async fn lookup( ); match discv5.find_node(target).await { - Err(err) => trace!(target: "discv5", + Err(err) => trace!(target: "net::discv5", %err, "lookup query failed" ), - Ok(peers) => trace!(target: "discv5", + Ok(peers) => trace!(target: "net::discv5", target=format!("{:#?}", target), peers_count=peers.len(), peers=format!("[{:#}]", peers.iter() @@ -645,7 +645,7 @@ pub async fn lookup( // `Discv5::connected_peers` can be subset of sessions, not all peers make it // into kbuckets, e.g. incoming sessions from peers with // unreachable enrs - debug!(target: "discv5", + debug!(target: "net::discv5", connected_peers=discv5.connected_peers(), "connected peers in routing table" ); From fbe04625b9c488e76d41b82cc28bbce268ebc4c1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 01:04:24 +0100 Subject: [PATCH 225/970] test: use port0 in tests (#12154) --- crates/net/network/tests/it/startup.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 89889a86946..d84ff492e5e 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -113,6 +113,7 @@ async fn test_node_record_address_with_nat() { .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); @@ -127,6 +128,7 @@ async fn test_node_record_address_with_nat_disable_discovery() { let config = NetworkConfigBuilder::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); From cc2a33cfc0c5f06b0d44fef72ffa987e1c97f025 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 03:09:11 +0100 Subject: [PATCH 226/970] feat: rate limit incoming ips (#12153) --- crates/net/network-types/src/peers/config.rs | 9 +++ crates/net/network/src/peers.rs | 63 +++++++++++++++++--- 2 files changed, 63 insertions(+), 9 deletions(-) diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 97a8bb3cac3..890679f5d34 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -24,6 +24,9 @@ pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; /// This restricts how many outbound dials can be performed concurrently. pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; +/// A temporary timeout for ips on incoming connection attempts. +pub const INBOUND_IP_THROTTLE_DURATION: Duration = Duration::from_secs(30); + /// The durations to use when a backoff should be applied to a peer. /// /// See also [`BackoffKind`]. @@ -155,6 +158,11 @@ pub struct PeersConfig { /// /// The backoff duration increases with number of backoff attempts. pub backoff_durations: PeerBackoffDurations, + /// How long to temporarily ban ips on incoming connection attempts. + /// + /// This acts as an IP based rate limit. + #[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))] + pub incoming_ip_throttle_duration: Duration, } impl Default for PeersConfig { @@ -171,6 +179,7 @@ impl Default for PeersConfig { trusted_nodes_only: false, basic_nodes: Default::default(), max_backoff_count: 5, + incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, } } } diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 016e7fa8d92..4855ff5e743 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -84,6 +84,8 @@ pub struct PeersManager { max_backoff_count: u8, /// Tracks the connection state of the node net_connection_state: NetworkConnectionState, + /// How long to temporarily ban ip on an incoming connection attempt. + incoming_ip_throttle_duration: Duration, } impl PeersManager { @@ -100,6 +102,7 @@ impl PeersManager { trusted_nodes_only, basic_nodes, max_backoff_count, + incoming_ip_throttle_duration, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -148,6 +151,7 @@ impl PeersManager { last_tick: Instant::now(), max_backoff_count, net_connection_state: NetworkConnectionState::default(), + incoming_ip_throttle_duration, } } @@ -265,6 +269,9 @@ impl PeersManager { return Err(InboundConnectionError::ExceedsCapacity) } + // apply the rate limit + self.throttle_incoming_ip(addr); + self.connection_info.inc_pending_in(); Ok(()) } @@ -383,6 +390,12 @@ impl PeersManager { self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration); } + /// Bans the IP temporarily to rate limit inbound connection attempts per IP. + fn throttle_incoming_ip(&mut self, ip: IpAddr) { + self.ban_list + .ban_ip_until(ip, std::time::Instant::now() + self.incoming_ip_throttle_duration); + } + /// Temporarily puts the peer in timeout by inserting it into the backedoff peers set fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) { trace!(target: "net::peers", ?peer_id, "backing off"); @@ -1129,15 +1142,6 @@ impl Display for InboundConnectionError { #[cfg(test)] mod tests { - use std::{ - future::{poll_fn, Future}, - io, - net::{IpAddr, Ipv4Addr, SocketAddr}, - pin::Pin, - task::{Context, Poll}, - time::Duration, - }; - use alloy_primitives::B512; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, @@ -1149,6 +1153,14 @@ mod tests { use reth_network_types::{ peers::reputation::DEFAULT_REPUTATION, BackoffKind, ReputationChangeKind, }; + use std::{ + future::{poll_fn, Future}, + io, + net::{IpAddr, Ipv4Addr, SocketAddr}, + pin::Pin, + task::{Context, Poll}, + time::Duration, + }; use url::Host; use super::PeersManager; @@ -2330,6 +2342,39 @@ mod tests { ); } + #[tokio::test] + async fn test_incoming_rate_limit() { + let config = PeersConfig { + incoming_ip_throttle_duration: Duration::from_millis(100), + ..PeersConfig::test() + }; + let mut peers = PeersManager::new(config); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(168, 0, 1, 2)), 8009); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + + peers.release_interval.reset_immediately(); + tokio::time::sleep(peers.incoming_ip_throttle_duration).await; + + // await unban + poll_fn(|cx| loop { + if peers.poll(cx).is_pending() { + return Poll::Ready(()); + } + }) + .await; + + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + } + #[tokio::test] async fn test_tick() { let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); From b48fa68f65a606b53ec19eb1dd2cbc090c3cd777 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 29 Oct 2024 01:14:34 -0400 Subject: [PATCH 227/970] fix(ecies): ecies typo (#12155) --- crates/net/network/src/session/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 74f303df7b8..3522aa6a75b 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -868,7 +868,7 @@ async fn authenticate( extra_handlers: RlpxSubProtocolHandlers, ) { let local_addr = stream.local_addr().ok(); - let stream = match get_eciess_stream(stream, secret_key, direction).await { + let stream = match get_ecies_stream(stream, secret_key, direction).await { Ok(stream) => stream, Err(error) => { let _ = events @@ -917,7 +917,7 @@ async fn authenticate( /// Returns an [`ECIESStream`] if it can be built. If not, send a /// [`PendingSessionEvent::EciesAuthError`] and returns `None` -async fn get_eciess_stream( +async fn get_ecies_stream( stream: Io, secret_key: SecretKey, direction: Direction, From 7880d4ddb016c9befc16c5a8f2c70601d9dd0fea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:52:00 +0700 Subject: [PATCH 228/970] refactor: change `PayloadConfig` to use parent header instead of parent block (#12159) --- .../src/commands/debug_cmd/build_block.rs | 5 ++-- crates/ethereum/payload/src/lib.rs | 28 +++++++++---------- crates/optimism/payload/src/builder.rs | 18 ++++++------ crates/payload/basic/src/lib.rs | 20 +++++++------ examples/custom-engine-types/src/main.rs | 8 +++--- .../custom-payload-builder/src/generator.rs | 7 +++-- 6 files changed, 47 insertions(+), 39 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 272f107d3c4..a2dfb5ab3ea 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -29,7 +29,8 @@ use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::database::CachedReads; use reth_primitives::{ revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, - PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, + PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, @@ -224,7 +225,7 @@ impl> Command { withdrawals: None, }; let payload_config = PayloadConfig::new( - Arc::clone(&best_block), + Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), Bytes::default(), reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 73b22efac40..8e188f890fd 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -97,7 +97,7 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); let pool = args.pool.clone(); default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { @@ -120,7 +120,7 @@ where None, ); - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); let pool = args.pool.clone(); @@ -154,13 +154,13 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; - debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); @@ -189,7 +189,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -201,10 +201,10 @@ where &mut db, &initialized_cfg, &initialized_block_env, - parent_block.hash(), + parent_header.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); + warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, "failed to update parent header blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; @@ -371,7 +371,7 @@ where let state_provider = db.database.0.inner.borrow_mut(); state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -393,9 +393,9 @@ where executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), )?; - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and @@ -407,7 +407,7 @@ where } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -419,7 +419,7 @@ where mix_hash: attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 36b7d17a07d..a1536ccf8fa 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -103,7 +103,7 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) } @@ -132,7 +132,7 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) @@ -163,13 +163,13 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, attributes, extra_data } = config; + let PayloadConfig { parent_header, attributes, extra_data } = config; - debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let block_gas_limit: u64 = attributes.gas_limit.unwrap_or_else(|| { @@ -206,7 +206,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_header=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -449,7 +449,7 @@ where let state_provider = db.database.0.inner.borrow_mut(); state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_header=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -470,7 +470,7 @@ where }; let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -482,7 +482,7 @@ where mix_hash: attributes.payload_attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index d0bb29502ea..bb8dc0ef66a 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -23,7 +23,7 @@ use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; use reth_primitives::{ - constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, + constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedHeader, Withdrawals, }; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, @@ -122,7 +122,7 @@ impl BasicPayloadJobGenerator Option { self.pre_cached.as_ref().filter(|pc| pc.block == parent).map(|pc| pc.cached.clone()) @@ -163,13 +163,17 @@ where block.seal(attributes.parent()) }; + let hash = parent_block.hash(); + let parent_header = parent_block.header(); + let header = SealedHeader::new(parent_header.clone(), hash); + let config = - PayloadConfig::new(Arc::new(parent_block), self.config.extradata.clone(), attributes); + PayloadConfig::new(Arc::new(header), self.config.extradata.clone(), attributes); let until = self.job_deadline(config.attributes.timestamp()); let deadline = Box::pin(tokio::time::sleep_until(until)); - let cached_reads = self.maybe_pre_cached(config.parent_block.hash()); + let cached_reads = self.maybe_pre_cached(hash); let mut job = BasicPayloadJob { config, @@ -706,8 +710,8 @@ impl Drop for Cancelled { /// Static config for how to build a payload. #[derive(Clone, Debug)] pub struct PayloadConfig { - /// The parent block. - pub parent_block: Arc, + /// The parent header. + pub parent_header: Arc, /// Block extra data. pub extra_data: Bytes, /// Requested attributes for the payload. @@ -727,11 +731,11 @@ where { /// Create new payload config. pub const fn new( - parent_block: Arc, + parent_header: Arc, extra_data: Bytes, attributes: Attributes, ) -> Self { - Self { parent_block, extra_data, attributes } + Self { parent_header, extra_data, attributes } } /// Returns the payload id. diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 0fa2b1658f8..30f89a0b9d7 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -341,7 +341,7 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); @@ -354,7 +354,7 @@ where client, pool, cached_reads, - config: PayloadConfig { parent_block, extra_data, attributes: attributes.0 }, + config: PayloadConfig { parent_header, extra_data, attributes: attributes.0 }, cancel, best_payload, }) @@ -365,10 +365,10 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new(chain_spec.clone())),client, - PayloadConfig { parent_block, extra_data, attributes: attributes.0}) + PayloadConfig { parent_header, extra_data, attributes: attributes.0}) } } diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index f5d64e41cd0..7341428872f 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -8,7 +8,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::BlockNumberOrTag; +use reth_primitives::{BlockNumberOrTag, SealedHeader}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -77,7 +77,10 @@ where // we already know the hash, so we can seal it block.seal(attributes.parent()) }; - let config = PayloadConfig::new(Arc::new(parent_block), Bytes::default(), attributes); + let hash = parent_block.hash(); + let header = SealedHeader::new(parent_block.header().clone(), hash); + + let config = PayloadConfig::new(Arc::new(header), Bytes::default(), attributes); Ok(EmptyBlockPayloadJob { client: self.client.clone(), _pool: self.pool.clone(), From 2dbbd152cbc77f12e0056b8ded5bbe7141d1f6f7 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 29 Oct 2024 20:00:22 +0900 Subject: [PATCH 229/970] chore(provider): remove unused `BlockExecutionReader` trait (#12156) --- .../src/providers/database/provider.rs | 180 +----------------- .../storage/provider/src/test_utils/mock.rs | 20 +- crates/storage/provider/src/traits/block.rs | 11 -- 3 files changed, 13 insertions(+), 198 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 308fa364a3d..e59a4f5635f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -6,15 +6,14 @@ use crate::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, writer::UnifiedStorageWriter, - AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, - HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, - LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, + BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, + HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, + HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, + StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; @@ -684,152 +683,6 @@ impl DatabaseProvider { }) } - /// Get requested blocks transaction with senders - pub(crate) fn get_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .get::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.get::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Get the given range of blocks. - pub fn get_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - Spec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Signers - - let block_headers = self.get::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - let block_header_hashes = self.get::(range.clone())?; - let block_ommers = self.get::(range.clone())?; - let block_withdrawals = self.get::(range.clone())?; - - let block_tx = self.get_block_transaction_range(range)?; - let mut blocks = Vec::with_capacity(block_headers.len()); - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals }, - }, - senders, - }) - } - - Ok(blocks) - } - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the @@ -3107,23 +2960,6 @@ impl HistoryWriter for DatabaseProvider BlockExecutionReader - for DatabaseProvider -{ - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult { - // get blocks - let blocks = self.get_block_range(range.clone())?; - - // get execution res - let execution_state = self.get_state(range)?.unwrap_or_default(); - - Ok(Chain::new(blocks, execution_state, None)) - } -} - impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 08530acf0a7..ed861f5f182 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,10 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, - EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, - StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::constants::EMPTY_ROOT_HASH; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -19,7 +18,7 @@ use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, @@ -818,15 +817,6 @@ impl ChangeSetReader for MockEthProvider { } } -impl BlockExecutionReader for MockEthProvider { - fn get_block_and_execution_range( - &self, - _range: RangeInclusive, - ) -> ProviderResult { - Ok(Chain::default()) - } -} - impl StateReader for MockEthProvider { fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 8e3a54d86b9..7202c405f06 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -2,7 +2,6 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::SealedBlockWithSenders; -use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; @@ -23,16 +22,6 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { ) -> ProviderResult<()>; } -/// BlockExecution Reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionReader: BlockReader + Send + Sync { - /// Get range of blocks and its execution result - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult; -} - /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { From dd18af1f1617ed855a889fe208ea4ba772409fb5 Mon Sep 17 00:00:00 2001 From: Debjit Bhowal Date: Tue, 29 Oct 2024 16:45:20 +0530 Subject: [PATCH 230/970] feat: without-evm cli option in reth (#12134) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + book/cli/reth/init-state.md | 16 +++ crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/init_state.rs | 79 ----------- crates/cli/commands/src/init_state/mod.rs | 132 ++++++++++++++++++ .../commands/src/init_state/without_evm.rs} | 65 +++++---- .../{init_state/mod.rs => init_state.rs} | 12 +- 7 files changed, 194 insertions(+), 112 deletions(-) delete mode 100644 crates/cli/commands/src/init_state.rs create mode 100644 crates/cli/commands/src/init_state/mod.rs rename crates/{optimism/cli/src/commands/init_state/bedrock.rs => cli/commands/src/init_state/without_evm.rs} (70%) rename crates/optimism/cli/src/commands/{init_state/mod.rs => init_state.rs} (87%) diff --git a/Cargo.lock b/Cargo.lock index c7a47e8f81f..8febcffa52c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6616,6 +6616,7 @@ dependencies = [ "ahash", "alloy-eips", "alloy-primitives", + "alloy-rlp", "arbitrary", "backon", "clap", diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index ddcd3cece37..3e073516778 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -72,6 +72,22 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --without-evm + Specifies whether to initialize the state without relying on EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last EVM block specified. It then, appends the first block provided block. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + --header + Header file containing the header in an RLP encoded format. + + --total-difficulty + Total difficulty of the header. + + --header-hash + Hash of the header. + JSONL file with state dump. diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index ef66a99410f..a0bc5147700 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -51,6 +51,7 @@ reth-trie-common = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true itertools.workspace = true futures.workspace = true diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs deleted file mode 100644 index 16e99f8fe97..00000000000 --- a/crates/cli/commands/src/init_state.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Command that initializes the node from a genesis file. - -use crate::common::{AccessRights, Environment, EnvironmentArgs}; -use alloy_primitives::B256; -use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_config::config::EtlConfig; -use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; -use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; - -use std::{fs::File, io::BufReader, path::PathBuf}; -use tracing::info; - -/// Initializes the database with the genesis block. -#[derive(Debug, Parser)] -pub struct InitStateCommand { - #[command(flatten)] - pub env: EnvironmentArgs, - - /// JSONL file with state dump. - /// - /// Must contain accounts in following format, additional account fields are ignored. Must - /// also contain { "root": \ } as first line. - /// { - /// "balance": "\", - /// "nonce": \, - /// "code": "\", - /// "storage": { - /// "\": "\", - /// .. - /// }, - /// "address": "\", - /// } - /// - /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until - /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] - pub state: PathBuf, -} - -impl> InitStateCommand { - /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); - - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - info!(target: "reth::cli", "Initiating state dump"); - - let hash = init_at_state(self.state, provider_factory, config.stages.etl)?; - - info!(target: "reth::cli", hash = ?hash, "Genesis block written"); - Ok(()) - } -} - -/// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state( - state_dump_path: PathBuf, - factory: ProviderFactory, - etl_config: EtlConfig, -) -> eyre::Result { - info!(target: "reth::cli", - path=?state_dump_path, - "Opening state dump"); - - let file = File::open(state_dump_path)?; - let reader = BufReader::new(file); - - let provider_rw = factory.provider_rw()?; - let hash = init_from_state_dump(reader, &provider_rw.0, etl_config)?; - provider_rw.commit()?; - - Ok(hash) -} diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs new file mode 100644 index 00000000000..adaec3e8be3 --- /dev/null +++ b/crates/cli/commands/src/init_state/mod.rs @@ -0,0 +1,132 @@ +//! Command that initializes the node from a genesis file. + +use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_primitives::{B256, U256}; +use clap::Parser; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_db_common::init::init_from_state_dump; +use reth_node_builder::NodeTypesWithEngine; +use reth_primitives::SealedHeader; +use reth_provider::{ + BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, +}; + +use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use tracing::info; + +pub mod without_evm; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + #[command(flatten)] + pub env: EnvironmentArgs, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Must + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + pub state: PathBuf, + + /// Specifies whether to initialize the state without relying on EVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last EVM + /// block specified. It then, appends the first block provided block. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + pub without_evm: bool, + + /// Header file containing the header in an RLP encoded format. + #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] + pub header: Option, + + /// Total difficulty of the header. + #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] + pub total_difficulty: Option, + + /// Hash of the header. + #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] + pub header_hash: Option, +} + +impl> InitStateCommand { + /// Execute the `init` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; + + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.database_provider_rw()?; + + if self.without_evm { + // ensure header, total difficulty and header hash are provided + let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; + let header = without_evm::read_header_from_file(header)?; + + let header_hash = + self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; + let header_hash = B256::from_str(&header_hash)?; + + let total_difficulty = self + .total_difficulty + .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; + let total_difficulty = U256::from_str(&total_difficulty)?; + + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + // &header, + // header_hash, + SealedHeader::new(header, header_hash), + total_difficulty, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwinded according to database checkpoints. + // + // Necessary to commit, so the header is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < header.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-evm-history." + )); + } + } + + info!(target: "reth::cli", "Initiating state dump"); + + let file = File::open(self.state)?; + let reader = BufReader::new(file); + + let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} diff --git a/crates/optimism/cli/src/commands/init_state/bedrock.rs b/crates/cli/commands/src/init_state/without_evm.rs similarity index 70% rename from crates/optimism/cli/src/commands/init_state/bedrock.rs rename to crates/cli/commands/src/init_state/without_evm.rs index efff065e505..187996653c3 100644 --- a/crates/optimism/cli/src/commands/init_state/bedrock.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,5 +1,6 @@ use alloy_primitives::{BlockNumber, B256, U256}; -use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use alloy_rlp::Decodable; + use reth_primitives::{ BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; @@ -7,28 +8,42 @@ use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, }; use reth_stages::{StageCheckpoint, StageId}; + +use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; -/// Creates a dummy chain (with no transactions) up to the last OVM block and appends the -/// first valid Bedrock block. -pub(crate) fn setup_op_mainnet_without_ovm( +/// Reads the header RLP from a file and returns the Header. +pub(crate) fn read_header_from_file(path: PathBuf) -> Result { + let mut file = File::open(path)?; + let mut buf = Vec::new(); + file.read_to_end(&mut buf)?; + + let header = Header::decode(&mut &buf[..])?; + Ok(header) +} + +/// Creates a dummy chain (with no transactions) up to the last EVM block and appends the +/// first valid block. +pub fn setup_without_evm( provider_rw: &Provider, static_file_provider: &StaticFileProvider, + header: SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> where Provider: StageCheckpointWriter + BlockWriter, { - info!(target: "reth::cli", "Setting up dummy OVM chain before importing state."); + info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); - // Write OVM dummy data up to `BEDROCK_HEADER - 1` block - append_dummy_chain(static_file_provider, BEDROCK_HEADER.number - 1)?; + // Write EVM dummy data up to `header - 1` block + append_dummy_chain(static_file_provider, header.number - 1)?; - info!(target: "reth::cli", "Appending Bedrock block."); + info!(target: "reth::cli", "Appending first valid block."); - append_bedrock_block(provider_rw, static_file_provider)?; + append_first_block(provider_rw, static_file_provider, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(BEDROCK_HEADER.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; } info!(target: "reth::cli", "Set up finished."); @@ -36,38 +51,30 @@ where Ok(()) } -/// Appends the first bedrock block. +/// Appends the first block. /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_bedrock_block( +fn append_first_block( provider_rw: impl BlockWriter, sf_provider: &StaticFileProvider, + header: &SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> { provider_rw.insert_block( - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BlockBody::default(), - ), - vec![], - ) - .expect("no senders or txes"), + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + .expect("no senders or txes"), )?; sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - &BEDROCK_HEADER, - BEDROCK_HEADER_TTD, - &BEDROCK_HEADER_HASH, + header, + total_difficulty, + &header.hash(), )?; - sf_provider - .latest_writer(StaticFileSegment::Receipts)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number)?; - sf_provider - .latest_writer(StaticFileSegment::Transactions)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number)?; Ok(()) } diff --git a/crates/optimism/cli/src/commands/init_state/mod.rs b/crates/optimism/cli/src/commands/init_state.rs similarity index 87% rename from crates/optimism/cli/src/commands/init_state/mod.rs rename to crates/optimism/cli/src/commands/init_state.rs index 3537f89e751..68f5d9a585f 100644 --- a/crates/optimism/cli/src/commands/init_state/mod.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -6,7 +6,8 @@ use reth_cli_commands::common::{AccessRights, Environment}; use reth_db_common::init::init_from_state_dump; use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_primitives::bedrock::BEDROCK_HEADER; +use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, @@ -14,8 +15,6 @@ use reth_provider::{ use std::{fs::File, io::BufReader}; use tracing::info; -mod bedrock; - /// Initializes the database with the genesis block. #[derive(Debug, Parser)] pub struct InitStateCommandOp { @@ -53,7 +52,12 @@ impl> InitStateCommandOp { let last_block_number = provider_rw.last_block_number()?; if last_block_number == 0 { - bedrock::setup_op_mainnet_without_ovm(&provider_rw, &static_file_provider)?; + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + BEDROCK_HEADER_TTD, + )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they // will be unwinded according to database checkpoints. From 3c6077812678e1a63896162db0bab225a809771d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:01:59 +0100 Subject: [PATCH 231/970] storage: `into_iter` with `self` by value (#12115) --- crates/storage/libmdbx-rs/src/cursor.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 3deff0c249b..26cfef54d8d 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -59,19 +59,18 @@ where } /// Returns an iterator over the raw key value slices. - #[allow(clippy::needless_lifetimes)] - pub fn iter_slices<'a>(&'a self) -> IntoIter<'a, K, Cow<'a, [u8]>, Cow<'a, [u8]>> { + pub fn iter_slices<'a>(self) -> IntoIter, Cow<'a, [u8]>> { self.into_iter() } /// Returns an iterator over database items. #[allow(clippy::should_implement_trait)] - pub fn into_iter(&self) -> IntoIter<'_, K, Key, Value> + pub fn into_iter(self) -> IntoIter where Key: TableObject, Value: TableObject, { - IntoIter::new(self.clone(), MDBX_NEXT, MDBX_NEXT) + IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) } /// Retrieves a key/data pair from the cursor. Depending on the cursor op, @@ -508,7 +507,7 @@ unsafe impl Sync for Cursor where K: TransactionKind {} /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum IntoIter<'cur, K, Key, Value> +pub enum IntoIter where K: TransactionKind, Key: TableObject, @@ -535,11 +534,11 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData<(&'cur (), Key, Value)>, + _marker: PhantomData<(Key, Value)>, }, } -impl IntoIter<'_, K, Key, Value> +impl IntoIter where K: TransactionKind, Key: TableObject, @@ -547,11 +546,11 @@ where { /// Creates a new iterator backed by the given cursor. fn new(cursor: Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { - IntoIter::Ok { cursor, op, next_op, _marker: Default::default() } + Self::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl Iterator for IntoIter<'_, K, Key, Value> +impl Iterator for IntoIter where K: TransactionKind, Key: TableObject, @@ -747,13 +746,13 @@ where } } -impl<'cur, K, Key, Value> Iterator for IterDup<'cur, K, Key, Value> +impl Iterator for IterDup<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, Value: TableObject, { - type Item = IntoIter<'cur, K, Key, Value>; + type Item = IntoIter; fn next(&mut self) -> Option { match self { From 1653877ed50b7a3d8d31104e18fd39f1e0ba66c9 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 29 Oct 2024 21:14:41 +0900 Subject: [PATCH 232/970] chore(ci): try to read all vectors on `compact-codec` before exiting in error (#12160) --- .../cli/commands/src/test_vectors/compact.rs | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 94552d5c215..8def25fa39b 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -167,9 +167,22 @@ pub fn generate_vectors_with(gen: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> /// re-encoding. pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> { fs::create_dir_all(VECTORS_FOLDER)?; + let mut errors = None; for read_fn in read { - read_fn()?; + if let Err(err) = read_fn() { + errors.get_or_insert_with(Vec::new).push(err); + } + } + + if let Some(err_list) = errors { + for error in err_list { + eprintln!("{:?}", error); + } + return Err(eyre::eyre!( + "If there are missing types, make sure to run `reth test-vectors compact --write` first.\n + If it happened during CI, ignore IF it's a new proposed type that `main` branch does not have." + )); } Ok(()) @@ -238,9 +251,8 @@ where // Read the file where the vectors are stored let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name); - let file = File::open(&file_path).wrap_err_with(|| { - "Failed to open vector. Make sure to run `reth test-vectors compact --write` first." - })?; + let file = + File::open(&file_path).wrap_err_with(|| format!("Failed to open vector {type_name}."))?; let reader = BufReader::new(file); let stored_values: Vec = serde_json::from_reader(reader)?; From 0f9ba64e954136a5507419ced37d1de96ff5b262 Mon Sep 17 00:00:00 2001 From: Evan Chipman <42247026+evchip@users.noreply.github.com> Date: Tue, 29 Oct 2024 19:17:04 +0700 Subject: [PATCH 233/970] feat: add geometry to database args (#11828) Co-authored-by: Matthias Seitz --- book/cli/reth/db.md | 6 + book/cli/reth/db/diff.md | 6 + book/cli/reth/debug/build-block.md | 6 + book/cli/reth/debug/execution.md | 6 + book/cli/reth/debug/in-memory-merkle.md | 6 + book/cli/reth/debug/merkle.md | 6 + book/cli/reth/debug/replay-engine.md | 6 + book/cli/reth/import.md | 6 + book/cli/reth/init-state.md | 6 + book/cli/reth/init.md | 6 + book/cli/reth/node.md | 6 + book/cli/reth/p2p.md | 6 + book/cli/reth/prune.md | 6 + book/cli/reth/recover/storage-tries.md | 6 + book/cli/reth/stage/drop.md | 6 + book/cli/reth/stage/dump.md | 6 + book/cli/reth/stage/run.md | 6 + book/cli/reth/stage/unwind.md | 6 + crates/node/core/src/args/database.rs | 183 +++++++++++++++++- .../storage/db/src/implementation/mdbx/mod.rs | 48 +++-- 20 files changed, 323 insertions(+), 16 deletions(-) diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index f9a8a158adc..17a6de4e607 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index f57c6ac364f..efb9e7d32e3 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 2e6d637d52c..7bceb62b940 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 9ca74897c5e..b8e1ce05d17 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 3e322a6913d..a183db997e9 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index d701803b81c..d9a72794ef2 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index dd587620a86..b7a1266d399 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 28e085bda71..82a521ac0ab 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index 3e073516778..533c0f8f888 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cd01accc047..ebe2a8386cf 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index a3ff8f6a57b..52f597279f0 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -590,6 +590,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 603b451d940..33639042a1d 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index ed16197a76c..41684ecd9e0 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index ecdaabe7781..1afe94f55db 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 399b3818c28..c22d6be6680 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 4b3de3fb1cb..e3df5bf2df7 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 9da3ce0deb6..204efc9685b 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 700ab3d7e7c..cb72b9313c0 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 0eec6639a11..16ba6193552 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,6 +1,6 @@ //! clap [Args](clap::Args) for database configuration -use std::time::Duration; +use std::{fmt, str::FromStr, time::Duration}; use crate::version::default_client_version; use clap::{ @@ -22,6 +22,12 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Maximum database size (e.g., 4TB, 8MB) + #[arg(long = "db.max-size", value_parser = parse_byte_size)] + pub max_size: Option, + /// Database growth step (e.g., 4GB, 4KB) + #[arg(long = "db.growth-step", value_parser = parse_byte_size)] + pub growth_step: Option, /// Read transaction timeout in seconds, 0 means no timeout. #[arg(long = "db.read-transaction-timeout")] pub read_transaction_timeout: Option, @@ -33,8 +39,9 @@ impl DatabaseArgs { self.get_database_args(default_client_version()) } - /// Returns the database arguments with configured log level and given client version. - pub const fn get_database_args( + /// Returns the database arguments with configured log level, client version, + /// max read transaction duration, and geometry. + pub fn get_database_args( &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { @@ -48,6 +55,7 @@ impl DatabaseArgs { .with_log_level(self.log_level) .with_exclusive(self.exclusive) .with_max_read_transaction_duration(max_read_transaction_duration) + .with_geometry(self.max_size, self.growth_step) } } @@ -89,10 +97,84 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } + +/// Size in bytes. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ByteSize(pub usize); + +impl From for usize { + fn from(s: ByteSize) -> Self { + s.0 + } +} + +impl FromStr for ByteSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.trim().to_uppercase(); + let parts: Vec<&str> = s.split_whitespace().collect(); + + let (num_str, unit) = match parts.len() { + 1 => { + let (num, unit) = + s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); + (num, unit) + } + 2 => (parts[0], parts[1]), + _ => { + return Err("Invalid format. Use '' or ' '.".to_string()) + } + }; + + let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; + + let multiplier = match unit { + "B" | "" => 1, // Assume bytes if no unit is specified + "KB" => 1024, + "MB" => 1024 * 1024, + "GB" => 1024 * 1024 * 1024, + "TB" => 1024 * 1024 * 1024 * 1024, + _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), + }; + + Ok(Self(num * multiplier)) + } +} + +impl fmt::Display for ByteSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const KB: usize = 1024; + const MB: usize = KB * 1024; + const GB: usize = MB * 1024; + const TB: usize = GB * 1024; + + let (size, unit) = if self.0 >= TB { + (self.0 as f64 / TB as f64, "TB") + } else if self.0 >= GB { + (self.0 as f64 / GB as f64, "GB") + } else if self.0 >= MB { + (self.0 as f64 / MB as f64, "MB") + } else if self.0 >= KB { + (self.0 as f64 / KB as f64, "KB") + } else { + (self.0 as f64, "B") + }; + + write!(f, "{:.2}{}", size, unit) + } +} + +/// Value parser function that supports various formats. +fn parse_byte_size(s: &str) -> Result { + s.parse::().map(Into::into) +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -108,6 +190,101 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_command_parser_with_valid_max_size() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "4398046511104", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_max_size() { + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_growth_step() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.growth-step", + "4294967296", + ]) + .unwrap(); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_growth_step() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "2TB", + "--db.growth-step", + "1GB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12MB", + "--db.growth-step", + "2KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + // with spaces + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12 MB", + "--db.growth-step", + "2 KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "1073741824", + "--db.growth-step", + "1048576", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(GIGABYTE)); + assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); + } + + #[test] + fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); + assert!(result.is_err()); + + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 65b804e6a58..92ad2027276 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::Deref, + ops::{Deref, Range}, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,8 +33,14 @@ use tx::Tx; pub mod cursor; pub mod tx; -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; +/// 1 KB in bytes +pub const KILOBYTE: usize = 1024; +/// 1 MB in bytes +pub const MEGABYTE: usize = KILOBYTE * 1024; +/// 1 GB in bytes +pub const GIGABYTE: usize = MEGABYTE * 1024; +/// 1 TB in bytes +pub const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -64,6 +70,8 @@ impl DatabaseEnvKind { pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, + /// Database geometry settings. + geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -93,15 +101,37 @@ pub struct DatabaseArguments { impl DatabaseArguments { /// Create new database arguments with given client version. - pub const fn new(client_version: ClientVersion) -> Self { + pub fn new(client_version: ClientVersion) -> Self { Self { client_version, + geometry: Geometry { + size: Some(0..(4 * TERABYTE)), + growth_step: Some(4 * GIGABYTE as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } + /// Set the geometry. + /// + /// # Arguments + /// + /// * `max_size` - Maximum database size in bytes + /// * `growth_step` - Database growth step in bytes + pub fn with_geometry(mut self, max_size: Option, growth_step: Option) -> Self { + self.geometry = Geometry { + size: max_size.map(|size| 0..size), + growth_step: growth_step.map(|growth_step| growth_step as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }; + self + } + /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -278,15 +308,7 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(Geometry { - // Maximum database size of 4 terabytes - size: Some(0..(4 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }); + inner_env.set_geometry(args.geometry); fn is_current_process(id: u32) -> bool { #[cfg(unix)] From 4a8799f98bba7aa80a8c333f9b32f23221cb0cb8 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:17:48 +0100 Subject: [PATCH 234/970] consensus: add unit tests for `ForkchoiceStateTracker` and `ForkchoiceStateHash` (#12077) --- .../consensus/beacon/src/engine/forkchoice.rs | 269 ++++++++++++++++-- 1 file changed, 243 insertions(+), 26 deletions(-) diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 7e49714ba37..a9d9301738f 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -8,7 +8,6 @@ pub struct ForkchoiceStateTracker { /// /// Caution: this can be invalid. latest: Option, - /// Tracks the latest forkchoice state that we received to which we need to sync. last_syncing: Option, /// The latest valid forkchoice state that we received and processed as valid. @@ -48,19 +47,19 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map(|s| s.is_valid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_valid()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_syncing()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] pub(crate) fn is_latest_invalid(&self) -> bool { - self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_invalid()) } /// Returns the last valid head hash. @@ -75,32 +74,28 @@ impl ForkchoiceStateTracker { self.last_syncing.as_ref().map(|s| s.head_block_hash) } - /// Returns the latest received `ForkchoiceState`. + /// Returns the latest received [`ForkchoiceState`]. /// /// Caution: this can be invalid. pub const fn latest_state(&self) -> Option { self.last_valid } - /// Returns the last valid `ForkchoiceState`. + /// Returns the last valid [`ForkchoiceState`]. pub const fn last_valid_state(&self) -> Option { self.last_valid } /// Returns the last valid finalized hash. /// - /// This will return [`None`], if either there is no valid finalized forkchoice state, or the - /// finalized hash for the latest valid forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no valid finalized forkchoice state, + /// - Or the finalized hash for the latest valid forkchoice state is zero. #[inline] pub fn last_valid_finalized(&self) -> Option { - self.last_valid.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_valid + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns the last received `ForkchoiceState` to which we need to sync. @@ -110,18 +105,14 @@ impl ForkchoiceStateTracker { /// Returns the sync target finalized hash. /// - /// This will return [`None`], if either there is no sync target forkchoice state, or the - /// finalized hash for the sync target forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no sync target forkchoice state, + /// - Or the finalized hash for the sync target forkchoice state is zero. #[inline] pub fn sync_target_finalized(&self) -> Option { - self.last_syncing.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_syncing + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns true if no forkchoice state has been received yet. @@ -222,3 +213,229 @@ impl AsRef for ForkchoiceStateHash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_forkchoice_state_tracker_set_latest_valid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Latest state is None + assert!(tracker.latest_status().is_none()); + + // Create a valid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Valid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is updated + assert!(tracker.last_valid.is_some()); + assert_eq!(tracker.last_valid.as_ref().unwrap(), &state); + + // Assert that last syncing state is None + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is valid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Valid)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_syncing() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create a syncing ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[0; 32]), // Zero to simulate not finalized + }; + let status = ForkchoiceStatus::Syncing; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is syncing + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is updated + assert!(tracker.last_syncing.is_some()); + assert_eq!(tracker.last_syncing.as_ref().unwrap(), &state); + + // Test when there is a latest status and it is syncing + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Syncing)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_invalid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create an invalid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Invalid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is invalid + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is None since the status is invalid + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is invalid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Invalid)); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Test when there is no last syncing state (should return None) + assert!(tracker.sync_target().is_none()); + + // Set a last syncing forkchoice state + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + tracker.last_syncing = Some(state); + + // Test when the last syncing state is set (should return the head block hash) + assert_eq!(tracker.sync_target(), Some(B256::from_slice(&[1; 32]))); + } + + #[test] + fn test_forkchoice_state_tracker_last_valid_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No valid finalized state (should return None) + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state, but finalized hash is zero (should return None) + let zero_finalized_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_valid = Some(zero_finalized_state); + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state with non-zero finalized hash (should return finalized hash) + let valid_finalized_state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[123; 32]), // Non-zero finalized hash + }; + tracker.last_valid = Some(valid_finalized_state); + assert_eq!(tracker.last_valid_finalized(), Some(B256::from_slice(&[123; 32]))); + + // Reset the last valid state to None + tracker.last_valid = None; + assert!(tracker.last_valid_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No sync target state (should return None) + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with finalized hash as zero (should return None) + let zero_finalized_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_syncing = Some(zero_finalized_sync_target); + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with non-zero finalized hash (should return the hash) + let valid_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[22; 32]), // Non-zero finalized hash + }; + tracker.last_syncing = Some(valid_sync_target); + assert_eq!(tracker.sync_target_finalized(), Some(B256::from_slice(&[22; 32]))); + + // Reset the last sync target state to None + tracker.last_syncing = None; + assert!(tracker.sync_target_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_is_empty() { + let mut forkchoice = ForkchoiceStateTracker::default(); + + // Initially, no forkchoice state has been received, so it should be empty. + assert!(forkchoice.is_empty()); + + // After setting a forkchoice state, it should no longer be empty. + forkchoice.set_latest(ForkchoiceState::default(), ForkchoiceStatus::Valid); + assert!(!forkchoice.is_empty()); + + // Reset the forkchoice latest, it should be empty again. + forkchoice.latest = None; + assert!(forkchoice.is_empty()); + } + + #[test] + fn test_forkchoice_state_hash_find() { + // Define example hashes + let head_hash = B256::random(); + let safe_hash = B256::random(); + let finalized_hash = B256::random(); + let non_matching_hash = B256::random(); + + // Create a ForkchoiceState with specific hashes + let state = ForkchoiceState { + head_block_hash: head_hash, + safe_block_hash: safe_hash, + finalized_block_hash: finalized_hash, + }; + + // Test finding the head hash + assert_eq!( + ForkchoiceStateHash::find(&state, head_hash), + Some(ForkchoiceStateHash::Head(head_hash)) + ); + + // Test finding the safe hash + assert_eq!( + ForkchoiceStateHash::find(&state, safe_hash), + Some(ForkchoiceStateHash::Safe(safe_hash)) + ); + + // Test finding the finalized hash + assert_eq!( + ForkchoiceStateHash::find(&state, finalized_hash), + Some(ForkchoiceStateHash::Finalized(finalized_hash)) + ); + + // Test with a hash that doesn't match any of the hashes in ForkchoiceState + assert_eq!(ForkchoiceStateHash::find(&state, non_matching_hash), None); + } +} From 6f3600dc38f7548a40999cc4c479f84d69326b76 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 13:58:57 +0100 Subject: [PATCH 235/970] Revert "feat: add geometry to database args" (#12164) --- book/cli/reth/db.md | 6 - book/cli/reth/db/diff.md | 6 - book/cli/reth/debug/build-block.md | 6 - book/cli/reth/debug/execution.md | 6 - book/cli/reth/debug/in-memory-merkle.md | 6 - book/cli/reth/debug/merkle.md | 6 - book/cli/reth/debug/replay-engine.md | 6 - book/cli/reth/import.md | 6 - book/cli/reth/init-state.md | 6 - book/cli/reth/init.md | 6 - book/cli/reth/node.md | 6 - book/cli/reth/p2p.md | 6 - book/cli/reth/prune.md | 6 - book/cli/reth/recover/storage-tries.md | 6 - book/cli/reth/stage/drop.md | 6 - book/cli/reth/stage/dump.md | 6 - book/cli/reth/stage/run.md | 6 - book/cli/reth/stage/unwind.md | 6 - crates/node/core/src/args/database.rs | 183 +----------------- .../storage/db/src/implementation/mdbx/mod.rs | 48 ++--- 20 files changed, 16 insertions(+), 323 deletions(-) diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 17a6de4e607..f9a8a158adc 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,12 +81,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index efb9e7d32e3..f57c6ac364f 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,12 +45,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 7bceb62b940..2e6d637d52c 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index b8e1ce05d17..9ca74897c5e 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index a183db997e9..3e322a6913d 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index d9a72794ef2..d701803b81c 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index b7a1266d399..dd587620a86 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 82a521ac0ab..28e085bda71 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index 533c0f8f888..3e073516778 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index ebe2a8386cf..cd01accc047 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 52f597279f0..a3ff8f6a57b 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -590,12 +590,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 33639042a1d..603b451d940 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,12 +247,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index 41684ecd9e0..ed16197a76c 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 1afe94f55db..ecdaabe7781 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index c22d6be6680..399b3818c28 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index e3df5bf2df7..4b3de3fb1cb 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,12 +76,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 204efc9685b..9da3ce0deb6 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,12 +69,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index cb72b9313c0..700ab3d7e7c 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,12 +74,6 @@ Database: [possible values: true, false] - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 16ba6193552..0eec6639a11 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,6 +1,6 @@ //! clap [Args](clap::Args) for database configuration -use std::{fmt, str::FromStr, time::Duration}; +use std::time::Duration; use crate::version::default_client_version; use clap::{ @@ -22,12 +22,6 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, - /// Maximum database size (e.g., 4TB, 8MB) - #[arg(long = "db.max-size", value_parser = parse_byte_size)] - pub max_size: Option, - /// Database growth step (e.g., 4GB, 4KB) - #[arg(long = "db.growth-step", value_parser = parse_byte_size)] - pub growth_step: Option, /// Read transaction timeout in seconds, 0 means no timeout. #[arg(long = "db.read-transaction-timeout")] pub read_transaction_timeout: Option, @@ -39,9 +33,8 @@ impl DatabaseArgs { self.get_database_args(default_client_version()) } - /// Returns the database arguments with configured log level, client version, - /// max read transaction duration, and geometry. - pub fn get_database_args( + /// Returns the database arguments with configured log level and given client version. + pub const fn get_database_args( &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { @@ -55,7 +48,6 @@ impl DatabaseArgs { .with_log_level(self.log_level) .with_exclusive(self.exclusive) .with_max_read_transaction_duration(max_read_transaction_duration) - .with_geometry(self.max_size, self.growth_step) } } @@ -97,84 +89,10 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } - -/// Size in bytes. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub struct ByteSize(pub usize); - -impl From for usize { - fn from(s: ByteSize) -> Self { - s.0 - } -} - -impl FromStr for ByteSize { - type Err = String; - - fn from_str(s: &str) -> Result { - let s = s.trim().to_uppercase(); - let parts: Vec<&str> = s.split_whitespace().collect(); - - let (num_str, unit) = match parts.len() { - 1 => { - let (num, unit) = - s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); - (num, unit) - } - 2 => (parts[0], parts[1]), - _ => { - return Err("Invalid format. Use '' or ' '.".to_string()) - } - }; - - let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; - - let multiplier = match unit { - "B" | "" => 1, // Assume bytes if no unit is specified - "KB" => 1024, - "MB" => 1024 * 1024, - "GB" => 1024 * 1024 * 1024, - "TB" => 1024 * 1024 * 1024 * 1024, - _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), - }; - - Ok(Self(num * multiplier)) - } -} - -impl fmt::Display for ByteSize { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - const KB: usize = 1024; - const MB: usize = KB * 1024; - const GB: usize = MB * 1024; - const TB: usize = GB * 1024; - - let (size, unit) = if self.0 >= TB { - (self.0 as f64 / TB as f64, "TB") - } else if self.0 >= GB { - (self.0 as f64 / GB as f64, "GB") - } else if self.0 >= MB { - (self.0 as f64 / MB as f64, "MB") - } else if self.0 >= KB { - (self.0 as f64 / KB as f64, "KB") - } else { - (self.0 as f64, "B") - }; - - write!(f, "{:.2}{}", size, unit) - } -} - -/// Value parser function that supports various formats. -fn parse_byte_size(s: &str) -> Result { - s.parse::().map(Into::into) -} - #[cfg(test)] mod tests { use super::*; use clap::Parser; - use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -190,101 +108,6 @@ mod tests { assert_eq!(args, default_args); } - #[test] - fn test_command_parser_with_valid_max_size() { - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.max-size", - "4398046511104", - ]) - .unwrap(); - assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); - } - - #[test] - fn test_command_parser_with_invalid_max_size() { - let result = - CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); - assert!(result.is_err()); - } - - #[test] - fn test_command_parser_with_valid_growth_step() { - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.growth-step", - "4294967296", - ]) - .unwrap(); - assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); - } - - #[test] - fn test_command_parser_with_invalid_growth_step() { - let result = - CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); - assert!(result.is_err()); - } - - #[test] - fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.max-size", - "2TB", - "--db.growth-step", - "1GB", - ]) - .unwrap(); - assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); - assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); - - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.max-size", - "12MB", - "--db.growth-step", - "2KB", - ]) - .unwrap(); - assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); - assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); - - // with spaces - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.max-size", - "12 MB", - "--db.growth-step", - "2 KB", - ]) - .unwrap(); - assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); - assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); - - let cmd = CommandParser::::try_parse_from([ - "reth", - "--db.max-size", - "1073741824", - "--db.growth-step", - "1048576", - ]) - .unwrap(); - assert_eq!(cmd.args.max_size, Some(GIGABYTE)); - assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); - } - - #[test] - fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { - let result = - CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); - assert!(result.is_err()); - - let result = - CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); - assert!(result.is_err()); - } - #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 92ad2027276..65b804e6a58 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::{Deref, Range}, + ops::Deref, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,14 +33,8 @@ use tx::Tx; pub mod cursor; pub mod tx; -/// 1 KB in bytes -pub const KILOBYTE: usize = 1024; -/// 1 MB in bytes -pub const MEGABYTE: usize = KILOBYTE * 1024; -/// 1 GB in bytes -pub const GIGABYTE: usize = MEGABYTE * 1024; -/// 1 TB in bytes -pub const TERABYTE: usize = GIGABYTE * 1024; +const GIGABYTE: usize = 1024 * 1024 * 1024; +const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -70,8 +64,6 @@ impl DatabaseEnvKind { pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, - /// Database geometry settings. - geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -101,37 +93,15 @@ pub struct DatabaseArguments { impl DatabaseArguments { /// Create new database arguments with given client version. - pub fn new(client_version: ClientVersion) -> Self { + pub const fn new(client_version: ClientVersion) -> Self { Self { client_version, - geometry: Geometry { - size: Some(0..(4 * TERABYTE)), - growth_step: Some(4 * GIGABYTE as isize), - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } - /// Set the geometry. - /// - /// # Arguments - /// - /// * `max_size` - Maximum database size in bytes - /// * `growth_step` - Database growth step in bytes - pub fn with_geometry(mut self, max_size: Option, growth_step: Option) -> Self { - self.geometry = Geometry { - size: max_size.map(|size| 0..size), - growth_step: growth_step.map(|growth_step| growth_step as isize), - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }; - self - } - /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -308,7 +278,15 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(args.geometry); + inner_env.set_geometry(Geometry { + // Maximum database size of 4 terabytes + size: Some(0..(4 * TERABYTE)), + // We grow the database in increments of 4 gigabytes + growth_step: Some(4 * GIGABYTE as isize), + // The database never shrinks + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }); fn is_current_process(id: u32) -> bool { #[cfg(unix)] From 52328422aad608705e68e118e911362c9adf761d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 29 Oct 2024 13:18:12 +0000 Subject: [PATCH 236/970] feat(storage): pass changesets to unwind methods (#7879) --- .../stages/src/stages/hashing_account.rs | 2 +- .../stages/src/stages/hashing_storage.rs | 2 +- .../src/stages/index_account_history.rs | 2 +- .../src/stages/index_storage_history.rs | 2 +- .../src/providers/database/provider.rs | 149 ++++++++++++------ crates/storage/provider/src/traits/hashing.rs | 30 +++- crates/storage/provider/src/traits/history.rs | 27 +++- 7 files changed, 155 insertions(+), 59 deletions(-) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 14afb37d81d..5b4f720972f 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -234,7 +234,7 @@ where input.unwind_block_range_with_threshold(self.commit_threshold); // Aggregate all transition changesets and make a list of accounts that have been changed. - provider.unwind_account_hashing(range)?; + provider.unwind_account_hashing_range(range)?; let mut stage_checkpoint = input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index ef070d30c6d..dcabbe83ee6 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -169,7 +169,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_hashing(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 8b10283fb4b..38c238e5d98 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -134,7 +134,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_account_history_indices(range)?; + provider.unwind_account_history_indices_range(range)?; // from HistoryIndex higher than that number. Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index ac645b8dd75..ba61e631230 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -140,7 +140,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_history_indices(BlockNumberAddress::range(range))?; + provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e59a4f5635f..2dcc3f92d70 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2639,19 +2639,17 @@ impl StorageTrieWriter for DatabaseProvid } impl HashingWriter for DatabaseProvider { - fn unwind_account_hashing( + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make a list of accounts that have been changed. // Note that collecting and then reversing the order is necessary to ensure that the // changes are applied in the correct order. - let hashed_accounts = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(_, e)| (keccak256(e.address), e.info))) - .collect::, _>>()? + let hashed_accounts = changesets + .into_iter() + .map(|(_, e)| (keccak256(e.address), e.info)) + .collect::>() .into_iter() .rev() .collect::>(); @@ -2669,13 +2667,25 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_hashing(changesets.iter()) + } + fn insert_account_for_hashing( &self, - accounts: impl IntoIterator)>, + changesets: impl IntoIterator)>, ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = - accounts.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); + changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; @@ -2688,18 +2698,15 @@ impl HashingWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make list of accounts that have been changed. - let mut changesets = self.tx.cursor_read::()?; let mut hashed_storages = changesets - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((_, address)), storage_entry)| { - (keccak256(address), keccak256(storage_entry.key), storage_entry.value) - }) + .into_iter() + .map(|(BlockNumberAddress((_, address)), storage_entry)| { + (keccak256(address), keccak256(storage_entry.key), storage_entry.value) }) - .collect::, _>>()?; + .collect::>(); hashed_storages.sort_by_key(|(ha, hk, _)| (*ha, *hk)); // Apply values to HashedState, and remove the account if it's None. @@ -2724,6 +2731,18 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_hashing(changesets.into_iter()) + } + fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, @@ -2845,16 +2864,14 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { - fn unwind_account_history_indices( + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult { - let mut last_indices = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(index, account)| (account.address, index))) - .collect::, _>>()?; + let mut last_indices = changesets + .into_iter() + .map(|(index, account)| (account.address, *index)) + .collect::>(); last_indices.sort_by_key(|(a, _)| *a); // Unwind the account history index. @@ -2881,6 +2898,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_history_indices(changesets.iter()) + } + fn insert_account_history_index( &self, account_transitions: impl IntoIterator)>, @@ -2893,16 +2922,12 @@ impl HistoryWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult { - let mut storage_changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) - }) - .collect::, _>>()?; + let mut storage_changesets = changesets + .into_iter() + .map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) + .collect::>(); storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); let mut cursor = self.tx.cursor_write::()?; @@ -2931,6 +2956,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_history_indices(changesets.into_iter()) + } + fn insert_storage_history_index( &self, storage_transitions: impl IntoIterator)>, @@ -2973,10 +3010,14 @@ impl, ) -> ProviderResult { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -2987,12 +3028,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3003,7 +3051,7 @@ impl, ) -> ProviderResult<()> { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -3075,12 +3127,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3091,7 +3150,7 @@ impl( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear account hashing in a given block range. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. + fn unwind_account_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Inserts all accounts into [reth_db::tables::AccountsHistory] table. @@ -38,7 +48,17 @@ pub trait HashingWriter: Send + Sync { /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn unwind_storage_hashing( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear storage hashing in a given block range. + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. + fn unwind_storage_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index cbf9bece4b9..4eadd6031c3 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,8 +1,9 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; +use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::ops::{Range, RangeInclusive}; +use std::ops::{RangeBounds, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] @@ -10,9 +11,17 @@ pub trait HistoryWriter: Send + Sync { /// Unwind and clear account history indices. /// /// Returns number of changesets walked. - fn unwind_account_history_indices( + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear account history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_account_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert account change index to database. Used inside AccountHistoryIndex stage @@ -26,7 +35,15 @@ pub trait HistoryWriter: Send + Sync { /// Returns number of changesets walked. fn unwind_storage_history_indices( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear storage history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_storage_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert storage change index to database. Used inside StorageHistoryIndex stage From f545877bb82724f60092ec87dbd71b668ab0132e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 14:23:54 +0100 Subject: [PATCH 237/970] Revert "Revert "feat: add geometry to database args"" (#12165) --- book/cli/reth/db.md | 6 + book/cli/reth/db/diff.md | 6 + book/cli/reth/debug/build-block.md | 6 + book/cli/reth/debug/execution.md | 6 + book/cli/reth/debug/in-memory-merkle.md | 6 + book/cli/reth/debug/merkle.md | 6 + book/cli/reth/debug/replay-engine.md | 6 + book/cli/reth/import.md | 6 + book/cli/reth/init-state.md | 6 + book/cli/reth/init.md | 6 + book/cli/reth/node.md | 6 + book/cli/reth/p2p.md | 6 + book/cli/reth/prune.md | 6 + book/cli/reth/recover/storage-tries.md | 6 + book/cli/reth/stage/drop.md | 6 + book/cli/reth/stage/dump.md | 6 + book/cli/reth/stage/run.md | 6 + book/cli/reth/stage/unwind.md | 6 + crates/node/core/src/args/database.rs | 184 +++++++++++++++++- .../storage/db/src/implementation/mdbx/mod.rs | 48 +++-- crates/storage/libmdbx-rs/src/environment.rs | 2 + 21 files changed, 326 insertions(+), 16 deletions(-) diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index f9a8a158adc..17a6de4e607 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index f57c6ac364f..efb9e7d32e3 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 2e6d637d52c..7bceb62b940 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 9ca74897c5e..b8e1ce05d17 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 3e322a6913d..a183db997e9 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index d701803b81c..d9a72794ef2 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index dd587620a86..b7a1266d399 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 28e085bda71..82a521ac0ab 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index 3e073516778..533c0f8f888 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cd01accc047..ebe2a8386cf 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index a3ff8f6a57b..52f597279f0 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -590,6 +590,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 603b451d940..33639042a1d 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index ed16197a76c..41684ecd9e0 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index ecdaabe7781..1afe94f55db 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 399b3818c28..c22d6be6680 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 4b3de3fb1cb..e3df5bf2df7 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 9da3ce0deb6..204efc9685b 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 700ab3d7e7c..cb72b9313c0 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 0eec6639a11..5b9d6ae61e2 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,6 +1,6 @@ //! clap [Args](clap::Args) for database configuration -use std::time::Duration; +use std::{fmt, str::FromStr, time::Duration}; use crate::version::default_client_version; use clap::{ @@ -22,6 +22,12 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Maximum database size (e.g., 4TB, 8MB) + #[arg(long = "db.max-size", value_parser = parse_byte_size)] + pub max_size: Option, + /// Database growth step (e.g., 4GB, 4KB) + #[arg(long = "db.growth-step", value_parser = parse_byte_size)] + pub growth_step: Option, /// Read transaction timeout in seconds, 0 means no timeout. #[arg(long = "db.read-transaction-timeout")] pub read_transaction_timeout: Option, @@ -33,8 +39,9 @@ impl DatabaseArgs { self.get_database_args(default_client_version()) } - /// Returns the database arguments with configured log level and given client version. - pub const fn get_database_args( + /// Returns the database arguments with configured log level, client version, + /// max read transaction duration, and geometry. + pub fn get_database_args( &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { @@ -48,6 +55,8 @@ impl DatabaseArgs { .with_log_level(self.log_level) .with_exclusive(self.exclusive) .with_max_read_transaction_duration(max_read_transaction_duration) + .with_geometry_max_size(self.max_size) + .with_growth_step(self.growth_step) } } @@ -89,10 +98,84 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } + +/// Size in bytes. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ByteSize(pub usize); + +impl From for usize { + fn from(s: ByteSize) -> Self { + s.0 + } +} + +impl FromStr for ByteSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.trim().to_uppercase(); + let parts: Vec<&str> = s.split_whitespace().collect(); + + let (num_str, unit) = match parts.len() { + 1 => { + let (num, unit) = + s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); + (num, unit) + } + 2 => (parts[0], parts[1]), + _ => { + return Err("Invalid format. Use '' or ' '.".to_string()) + } + }; + + let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; + + let multiplier = match unit { + "B" | "" => 1, // Assume bytes if no unit is specified + "KB" => 1024, + "MB" => 1024 * 1024, + "GB" => 1024 * 1024 * 1024, + "TB" => 1024 * 1024 * 1024 * 1024, + _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), + }; + + Ok(Self(num * multiplier)) + } +} + +impl fmt::Display for ByteSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const KB: usize = 1024; + const MB: usize = KB * 1024; + const GB: usize = MB * 1024; + const TB: usize = GB * 1024; + + let (size, unit) = if self.0 >= TB { + (self.0 as f64 / TB as f64, "TB") + } else if self.0 >= GB { + (self.0 as f64 / GB as f64, "GB") + } else if self.0 >= MB { + (self.0 as f64 / MB as f64, "MB") + } else if self.0 >= KB { + (self.0 as f64 / KB as f64, "KB") + } else { + (self.0 as f64, "B") + }; + + write!(f, "{:.2}{}", size, unit) + } +} + +/// Value parser function that supports various formats. +fn parse_byte_size(s: &str) -> Result { + s.parse::().map(Into::into) +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -108,6 +191,101 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_command_parser_with_valid_max_size() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "4398046511104", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_max_size() { + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_growth_step() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.growth-step", + "4294967296", + ]) + .unwrap(); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_growth_step() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "2TB", + "--db.growth-step", + "1GB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12MB", + "--db.growth-step", + "2KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + // with spaces + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12 MB", + "--db.growth-step", + "2 KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "1073741824", + "--db.growth-step", + "1048576", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(GIGABYTE)); + assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); + } + + #[test] + fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); + assert!(result.is_err()); + + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 65b804e6a58..78a3f7971da 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::Deref, + ops::{Deref, Range}, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,8 +33,14 @@ use tx::Tx; pub mod cursor; pub mod tx; -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; +/// 1 KB in bytes +pub const KILOBYTE: usize = 1024; +/// 1 MB in bytes +pub const MEGABYTE: usize = KILOBYTE * 1024; +/// 1 GB in bytes +pub const GIGABYTE: usize = MEGABYTE * 1024; +/// 1 TB in bytes +pub const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -64,6 +70,8 @@ impl DatabaseEnvKind { pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, + /// Database geometry settings. + geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -93,15 +101,37 @@ pub struct DatabaseArguments { impl DatabaseArguments { /// Create new database arguments with given client version. - pub const fn new(client_version: ClientVersion) -> Self { + pub fn new(client_version: ClientVersion) -> Self { Self { client_version, + geometry: Geometry { + size: Some(0..(4 * TERABYTE)), + growth_step: Some(4 * GIGABYTE as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } + /// Sets the upper size limit of the db environment, the maximum database size in bytes. + pub const fn with_geometry_max_size(mut self, max_size: Option) -> Self { + if let Some(max_size) = max_size { + self.geometry.size = Some(0..max_size); + } + self + } + + /// Configures the database growth step in bytes. + pub const fn with_growth_step(mut self, growth_step: Option) -> Self { + if let Some(growth_step) = growth_step { + self.geometry.growth_step = Some(growth_step as isize); + } + self + } + /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -278,15 +308,7 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(Geometry { - // Maximum database size of 4 terabytes - size: Some(0..(4 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }); + inner_env.set_geometry(args.geometry); fn is_current_process(id: u32) -> bool { #[cfg(unix)] diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 480f5aaab65..6a0b210401e 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -489,8 +489,10 @@ pub struct PageOps { pub mincore: u64, } +/// Represents the geometry settings for the database environment #[derive(Clone, Debug, PartialEq, Eq)] pub struct Geometry { + /// The size range in bytes. pub size: Option, pub growth_step: Option, pub shrink_threshold: Option, From e92ecfbc220929f6bddb283b32896c8257a82748 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Tue, 29 Oct 2024 21:34:12 +0700 Subject: [PATCH 238/970] feat: Introduce trait for OpTransaction (#11745) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + Cargo.toml | 2 +- crates/optimism/evm/src/execute.rs | 1 + crates/optimism/payload/Cargo.toml | 5 +- crates/optimism/payload/src/builder.rs | 1 + crates/optimism/rpc/src/eth/receipt.rs | 4 +- crates/optimism/rpc/src/eth/transaction.rs | 1 + crates/primitives/src/transaction/compat.rs | 2 + crates/primitives/src/transaction/mod.rs | 63 +++++++++------------ 9 files changed, 40 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8febcffa52c..5b62244576e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8260,6 +8260,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", "reth-chain-state", diff --git a/Cargo.toml b/Cargo.toml index c83d76318e1..8c5cf7b0cd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -633,4 +633,4 @@ tracy-client = "0.17.3" #op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } \ No newline at end of file diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 1cd92409847..9c5db9d4b61 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -5,6 +5,7 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; use core::fmt::Display; +use op_alloy_consensus::DepositTransaction; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index ba0b105e832..4b8e64f2dba 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -39,6 +39,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true +op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -54,5 +55,5 @@ optimism = [ "reth-optimism-evm/optimism", "revm/optimism", "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", -] + "reth-optimism-consensus/optimism" +] \ No newline at end of file diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index a1536ccf8fa..0550adeaa8d 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -36,6 +36,7 @@ use crate::{ error::OptimismPayloadBuilderError, payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, }; +use op_alloy_consensus::DepositTransaction; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 3f2a81573e2..2734fb5458c 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -2,7 +2,9 @@ use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; +use op_alloy_consensus::{ + DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, +}; use op_alloy_rpc_types::{receipt::L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3345ac5d452..3994afe1984 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -3,6 +3,7 @@ use alloy_consensus::Transaction as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types::TransactionInfo; +use op_alloy_consensus::DepositTransaction; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; use reth_primitives::TransactionSignedEcRecovered; diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 81281186f64..883c89c45f5 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,5 +1,7 @@ use crate::{Transaction, TransactionSigned}; use alloy_primitives::{Address, TxKind, U256}; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use revm_primitives::{AuthorizationList, TxEnv}; /// Implements behaviour to fill a [`TxEnv`] from another transaction. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 3a5c3674166..e81c5ad332c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -18,6 +18,8 @@ use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; use signature::{decode_with_eip155_chain_id, with_eip155_parity}; @@ -136,6 +138,31 @@ pub enum Transaction { Deposit(TxDeposit), } +#[cfg(feature = "optimism")] +impl DepositTransaction for Transaction { + fn source_hash(&self) -> Option { + match self { + Self::Deposit(tx) => tx.source_hash(), + _ => None, + } + } + fn mint(&self) -> Option { + match self { + Self::Deposit(tx) => tx.mint(), + _ => None, + } + } + fn is_system_transaction(&self) -> bool { + match self { + Self::Deposit(tx) => tx.is_system_transaction(), + _ => false, + } + } + fn is_deposit(&self) -> bool { + matches!(self, Self::Deposit(_)) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Transaction { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -361,42 +388,6 @@ impl Transaction { } } - /// Returns the source hash of the transaction, which uniquely identifies its source. - /// If not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn source_hash(&self) -> Option { - match self { - Self::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), - _ => None, - } - } - - /// Returns the amount of ETH locked up on L1 that will be minted on L2. If the transaction - /// is not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn mint(&self) -> Option { - match self { - Self::Deposit(TxDeposit { mint, .. }) => *mint, - _ => None, - } - } - - /// Returns whether or not the transaction is a system transaction. If the transaction - /// is not a deposit transaction, this will always return `false`. - #[cfg(feature = "optimism")] - pub const fn is_system_transaction(&self) -> bool { - match self { - Self::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, - _ => false, - } - } - - /// Returns whether or not the transaction is an Optimism Deposited transaction. - #[cfg(feature = "optimism")] - pub const fn is_deposit(&self) -> bool { - matches!(self, Self::Deposit(_)) - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { From 58f24e0056b8113b4762796c005b2a73a75a42c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Oct 2024 15:35:42 +0100 Subject: [PATCH 239/970] chore: remove generate sidecar fn (#12167) --- crates/primitives/src/transaction/mod.rs | 2 -- crates/primitives/src/transaction/sidecar.rs | 37 +++----------------- crates/transaction-pool/src/pool/mod.rs | 5 +-- 3 files changed, 7 insertions(+), 37 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e81c5ad332c..2e0a786fc1a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -31,8 +31,6 @@ pub use error::{ }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index e901cbfc08d..1e6560e152b 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -191,35 +191,6 @@ impl BlobTransaction { } } -/// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use alloc::vec::Vec; - use alloy_eips::eip4844::env_settings::EnvKzgSettings; - use c_kzg::{KzgCommitment, KzgProof}; - - let kzg_settings = EnvKzgSettings::Default; - - let commitments: Vec = blobs - .iter() - .map(|blob| { - KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() - }) - .map(|commitment| commitment.to_bytes()) - .collect(); - - let proofs: Vec = blobs - .iter() - .zip(commitments.iter()) - .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() - }) - .map(|proof| proof.to_bytes()) - .collect(); - - BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) -} - #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; @@ -251,7 +222,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert commitment equality assert_eq!( @@ -300,7 +271,7 @@ mod tests { } // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs.clone()); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert sidecar size assert_eq!(sidecar.size(), 524672); @@ -325,7 +296,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); @@ -356,7 +327,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 69f17504f79..fef0fd0eea0 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -1236,7 +1236,8 @@ mod tests { validate::ValidTransaction, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionValidationOutcome, U256, }; - use reth_primitives::{kzg::Blob, transaction::generate_blob_sidecar}; + use alloy_eips::eip4844::BlobTransactionSidecar; + use reth_primitives::kzg::Blob; use std::{fs, path::PathBuf}; #[test] @@ -1271,7 +1272,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs. - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create an in-memory blob store. let blob_store = InMemoryBlobStore::default(); From 1006ce78c530088fd103bf9ffe14cc3ae6123ce9 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 29 Oct 2024 23:37:41 +0900 Subject: [PATCH 240/970] feat(provider): use `NodeTypes` on `DatabaseProvider` instead (#12166) --- crates/stages/stages/benches/criterion.rs | 6 +- crates/stages/stages/benches/setup/mod.rs | 11 +- .../stages/src/stages/hashing_account.rs | 7 +- crates/stages/stages/src/test_utils/runner.rs | 5 +- .../provider/src/providers/consistent.rs | 8 +- .../provider/src/providers/database/mod.rs | 8 +- .../src/providers/database/provider.rs | 151 +++++++++--------- .../storage/provider/src/test_utils/blocks.rs | 5 +- .../storage/provider/src/test_utils/mock.rs | 14 +- crates/trie/db/tests/trie.rs | 11 +- 10 files changed, 118 insertions(+), 108 deletions(-) diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 7519d81a362..0f876dd7011 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -2,12 +2,11 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; -use reth_chainspec::ChainSpec; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; use alloy_primitives::BlockNumber; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, @@ -148,7 +147,8 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 4812fb13c39..e6ae33f9c29 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,14 +1,15 @@ #![allow(unreachable_pub)] use alloy_primitives::{Address, Sealable, B256, U256}; use itertools::concat; -use reth_chainspec::ChainSpec; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, SealedBlock, SealedHeader}; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; +use reth_provider::{ + test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory, TrieWriter, +}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -31,7 +32,8 @@ use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind< - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, >( stage: S, db: &TestStageDB, @@ -63,7 +65,8 @@ pub(crate) fn stage_unwind< pub(crate) fn unwind_hashes(stage: S, db: &TestStageDB, range: StageRange) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, { let (input, unwind) = range; diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 5b4f720972f..1ca0e1aa132 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -58,11 +58,8 @@ impl AccountHashingStage { /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. - pub fn seed< - Tx: DbTx + DbTxMut + 'static, - Spec: Send + Sync + 'static + reth_chainspec::EthereumHardforks, - >( - provider: &reth_provider::DatabaseProvider, + pub fn seed( + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use alloy_primitives::U256; diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 26f245c1304..c3d25b99536 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -1,7 +1,6 @@ use super::TestStageDB; -use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_provider::{DatabaseProvider, ProviderError}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, ProviderError}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; @@ -20,7 +19,7 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage as Database>::TXMut, ChainSpec>> + type S: Stage as Database>::TXMut, MockNodeTypesWithDB>> + 'static; /// Return a reference to the database. diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index d6847fa1b8f..e6ca1a91932 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -296,7 +296,7 @@ impl ConsistentProvider { ) -> ProviderResult> where F: FnOnce( - &DatabaseProviderRO, + &DatabaseProviderRO, RangeInclusive, &mut P, ) -> ProviderResult>, @@ -413,7 +413,7 @@ impl ConsistentProvider { ) -> ProviderResult> where S: FnOnce( - &DatabaseProviderRO, + &DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, @@ -511,7 +511,7 @@ impl ConsistentProvider { fetch_from_block_state: M, ) -> ProviderResult> where - S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); @@ -578,7 +578,7 @@ impl ConsistentProvider { fetch_from_block_state: M, ) -> ProviderResult where - S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, M: Fn(&BlockState) -> ProviderResult, { if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 54186dca6f6..04a30ce90aa 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -130,7 +130,7 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), @@ -144,7 +144,7 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), @@ -186,8 +186,8 @@ impl ProviderFactory { impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; - type Provider = DatabaseProvider<::TX, N::ChainSpec>; - type ProviderRW = DatabaseProvider<::TXMut, N::ChainSpec>; + type Provider = DatabaseProvider<::TX, N>; + type ProviderRW = DatabaseProvider<::TXMut, N>; fn database_provider_ro(&self) -> ProviderResult { self.provider() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2dcc3f92d70..81affa0d804 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -38,6 +38,7 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; +use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, @@ -70,40 +71,40 @@ use tokio::sync::watch; use tracing::{debug, error, trace, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; +pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW( - pub DatabaseProvider<::TXMut, Spec>, +pub struct DatabaseProviderRW( + pub DatabaseProvider<::TXMut, N>, ); -impl Deref for DatabaseProviderRW { - type Target = DatabaseProvider<::TXMut, Spec>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut, N>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl AsRef::TXMut, Spec>> - for DatabaseProviderRW +impl AsRef::TXMut, N>> + for DatabaseProviderRW { - fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec> { + fn as_ref(&self) -> &DatabaseProvider<::TXMut, N> { &self.0 } } -impl DatabaseProviderRW { +impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { self.0.commit() @@ -115,10 +116,10 @@ impl DatabaseProviderRW { } } -impl From> - for DatabaseProvider<::TXMut, Spec> +impl From> + for DatabaseProvider<::TXMut, N> { - fn from(provider: DatabaseProviderRW) -> Self { + fn from(provider: DatabaseProviderRW) -> Self { provider.0 } } @@ -126,25 +127,25 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] -pub struct DatabaseProvider { +pub struct DatabaseProvider { /// Database transaction. tx: TX, /// Chain spec - chain_spec: Arc, + chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, } -impl DatabaseProvider { +impl DatabaseProvider { /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } -impl DatabaseProvider { +impl DatabaseProvider { /// State provider for latest block pub fn latest<'a>(&'a self) -> ProviderResult> { trace!(target: "providers::db", "Returning latest state provider"); @@ -202,28 +203,28 @@ impl DatabaseProvider { } } -impl StaticFileProviderFactory for DatabaseProvider { +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl ChainSpecProvider - for DatabaseProvider +impl> ChainSpecProvider + for DatabaseProvider { - type ChainSpec = Spec; + type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -231,15 +232,13 @@ impl DatabaseProvider { } } -impl AsRef for DatabaseProvider { +impl AsRef for DatabaseProvider { fn as_ref(&self) -> &Self { self } } -impl TryIntoHistoricalStateProvider - for DatabaseProvider -{ +impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, mut block_number: BlockNumber, @@ -282,8 +281,8 @@ impl TryIntoHistoricalStateProvider } } -impl - DatabaseProvider +impl + 'static> + DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] @@ -365,11 +364,11 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -392,7 +391,7 @@ impl DatabaseProvider { } /// Returns a reference to the chain specification. - pub fn chain_spec(&self) -> &Spec { + pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } @@ -490,7 +489,7 @@ impl DatabaseProvider { construct_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, BF: FnOnce( @@ -556,7 +555,7 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, F: FnMut(H, Range, Vec
, Option) -> ProviderResult, @@ -633,7 +632,7 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn( @@ -853,7 +852,7 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) @@ -1079,7 +1078,7 @@ impl DatabaseProvider { range: impl RangeBounds + Clone, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, { // For blocks we need: // @@ -1218,13 +1217,13 @@ impl DatabaseProvider { } } -impl AccountReader for DatabaseProvider { +impl AccountReader for DatabaseProvider { fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } -impl AccountExtReader for DatabaseProvider { +impl AccountExtReader for DatabaseProvider { fn changed_accounts_with_range( &self, range: impl RangeBounds, @@ -1268,7 +1267,7 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { +impl StorageChangeSetReader for DatabaseProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -1283,7 +1282,7 @@ impl StorageChangeSetReader for DatabaseProvider ChangeSetReader for DatabaseProvider { +impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -1300,7 +1299,7 @@ impl ChangeSetReader for DatabaseProvider } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider for DatabaseProvider { fn sync_gap( &self, tip: watch::Receiver, @@ -1344,8 +1343,8 @@ impl HeaderSyncGapProvider for DatabaseProvider HeaderProvider - for DatabaseProvider +impl> HeaderProvider + for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { @@ -1444,7 +1443,7 @@ impl HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader for DatabaseProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1471,7 +1470,7 @@ impl BlockHashReader for DatabaseProvider } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader for DatabaseProvider { fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1502,7 +1501,7 @@ impl BlockNumReader for DatabaseProvider } } -impl BlockReader for DatabaseProvider { +impl> BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1677,8 +1676,8 @@ impl BlockReader for DatabasePr } } -impl TransactionsProviderExt - for DatabaseProvider +impl> TransactionsProviderExt + for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and /// calculating them in a parallel manner. Returned unsorted. @@ -1747,8 +1746,8 @@ impl TransactionsProviderExt } // Calculates the hash of the given transaction -impl TransactionsProvider - for DatabaseProvider +impl> TransactionsProvider + for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) @@ -1907,8 +1906,8 @@ impl TransactionsProvider } } -impl ReceiptProvider - for DatabaseProvider +impl> ReceiptProvider + for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -1955,8 +1954,8 @@ impl ReceiptProvider } } -impl WithdrawalsProvider - for DatabaseProvider +impl> WithdrawalsProvider + for DatabaseProvider { fn withdrawals_by_block( &self, @@ -1985,8 +1984,8 @@ impl WithdrawalsProvider } } -impl EvmEnvProvider - for DatabaseProvider +impl> EvmEnvProvider + for DatabaseProvider { fn fill_env_at( &self, @@ -2051,7 +2050,7 @@ impl EvmEnvProvider } } -impl StageCheckpointReader for DatabaseProvider { +impl StageCheckpointReader for DatabaseProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } @@ -2070,7 +2069,7 @@ impl StageCheckpointReader for DatabaseProvider StageCheckpointWriter for DatabaseProvider { +impl StageCheckpointWriter for DatabaseProvider { /// Save stage checkpoint. fn save_stage_checkpoint( &self, @@ -2111,7 +2110,7 @@ impl StageCheckpointWriter for DatabaseProvider< } } -impl StorageReader for DatabaseProvider { +impl StorageReader for DatabaseProvider { fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2174,7 +2173,7 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter for DatabaseProvider { fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2551,7 +2550,7 @@ impl StateChangeWriter for DatabaseProvid } } -impl TrieWriter for DatabaseProvider { +impl TrieWriter for DatabaseProvider { /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2601,7 +2600,7 @@ impl TrieWriter for DatabaseProvider StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2638,7 +2637,7 @@ impl StorageTrieWriter for DatabaseProvid } } -impl HashingWriter for DatabaseProvider { +impl HashingWriter for DatabaseProvider { fn unwind_account_hashing<'a>( &self, changesets: impl Iterator, @@ -2863,7 +2862,7 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { +impl HistoryWriter for DatabaseProvider { fn unwind_account_history_indices<'a>( &self, changesets: impl Iterator, @@ -2997,14 +2996,14 @@ impl HistoryWriter for DatabaseProvider StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl - BlockExecutionWriter for DatabaseProvider +impl + 'static> + BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_range( &self, @@ -3205,8 +3204,8 @@ impl BlockWriter - for DatabaseProvider +impl + 'static> BlockWriter + for DatabaseProvider { /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3418,7 +3417,7 @@ impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader for DatabaseProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3435,7 +3434,7 @@ impl PruneCheckpointReader for DatabaseProvider PruneCheckpointWriter for DatabaseProvider { +impl PruneCheckpointWriter for DatabaseProvider { fn save_prune_checkpoint( &self, segment: PruneSegment, @@ -3445,7 +3444,7 @@ impl PruneCheckpointWriter for DatabaseProvider< } } -impl StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3458,7 +3457,7 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader for DatabaseProvider { fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3484,7 +3483,7 @@ impl ChainStateBlockReader for DatabaseProvider ChainStateBlockWriter for DatabaseProvider { +impl ChainStateBlockWriter for DatabaseProvider { fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { Ok(self .tx @@ -3498,7 +3497,7 @@ impl ChainStateBlockWriter for DatabaseProvider< } } -impl DBProvider for DatabaseProvider { +impl DBProvider for DatabaseProvider { type Tx = TX; fn tx_ref(&self) -> &Self::Tx { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 07486f5557c..cacb71b351d 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -8,6 +8,7 @@ use alloy_primitives::{ use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; +use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, @@ -17,8 +18,8 @@ use revm::{db::BundleState, primitives::AccountInfo}; use std::{str::FromStr, sync::LazyLock}; /// Assert genesis block -pub fn assert_genesis_block( - provider: &DatabaseProviderRW, +pub fn assert_genesis_block( + provider: &DatabaseProviderRW, g: SealedBlock, ) { let n = g.number; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index ed861f5f182..1053b4778fd 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -19,6 +19,7 @@ use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; +use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, @@ -149,10 +150,19 @@ impl MockEthProvider { } } +/// Mock node. +#[derive(Debug)] +pub struct MockNode; + +impl NodeTypes for MockNode { + type Primitives = (); + type ChainSpec = ChainSpec; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; - type Provider = DatabaseProvider; - type ProviderRW = DatabaseProvider; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; fn database_provider_ro(&self) -> ProviderResult { Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index f5823404c89..aee26436479 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -11,7 +11,8 @@ use reth_db_api::{ }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ - test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, + providers::ProviderNodeTypes, test_utils::create_test_provider_factory, DatabaseProviderRW, + StorageTrieWriter, TrieWriter, }; use reth_trie::{ prefix_set::PrefixSetMut, @@ -693,8 +694,8 @@ fn storage_trie_around_extension_node() { assert_trie_updates(updates.storage_nodes_ref()); } -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>, N>, hashed_address: B256, ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); @@ -721,8 +722,8 @@ fn extension_node_storage_trie( (root, trie_updates) } -fn extension_node_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_trie( + tx: &DatabaseProviderRW>, N>, ) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); From 2e750f0ca03043cd2370036cc4ed7c9e1c12f173 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:57:35 +0100 Subject: [PATCH 241/970] test(prune): add unit tests for `ReceiptsLogPruneConfig` (#11916) --- crates/prune/types/src/lib.rs | 230 ++++++++++++++++++++++++++++++++- crates/prune/types/src/mode.rs | 5 + 2 files changed, 231 insertions(+), 4 deletions(-) diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 6e06d6fc5dc..8483b7b7370 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -27,6 +27,7 @@ use std::collections::BTreeMap; pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; use alloy_primitives::{Address, BlockNumber}; +use std::ops::Deref; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -59,7 +60,7 @@ impl ReceiptsLogPruneConfig { pruned_block: Option, ) -> Result>, PruneSegmentError> { let mut map = BTreeMap::new(); - let pruned_block = pruned_block.unwrap_or_default(); + let base_block = pruned_block.unwrap_or_default() + 1; for (address, mode) in &self.0 { // Getting `None`, means that there is nothing to prune yet, so we need it to include in @@ -69,7 +70,7 @@ impl ReceiptsLogPruneConfig { // // Reminder, that we increment because the [`BlockNumber`] key of the new map should be // viewed as `PruneMode::Before(block)` - let block = (pruned_block + 1).max( + let block = base_block.max( mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? .map(|(block, _)| block) .unwrap_or_default() + @@ -90,8 +91,8 @@ impl ReceiptsLogPruneConfig { let pruned_block = pruned_block.unwrap_or_default(); let mut lowest = None; - for mode in self.0.values() { - if let PruneMode::Distance(_) = mode { + for mode in self.values() { + if mode.is_distance() { if let Some((block, _)) = mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? { @@ -103,3 +104,224 @@ impl ReceiptsLogPruneConfig { Ok(lowest.map(|lowest| lowest.max(pruned_block))) } } + +impl Deref for ReceiptsLogPruneConfig { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_block_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.group_by_block(tip, pruned_block).unwrap(); + assert!(result.is_empty(), "The result should be empty when the config is empty"); + } + + #[test] + fn test_group_by_block_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + // Big tip to have something to prune for the target block + let tip = 3000000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 500 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); + + // Tip smaller than the target block, so that we have nothing to prune for the block + let tip = 300; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 400 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); + } + + #[test] + fn test_group_by_block_multiple_entries() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Before(600); + let prune_mode2 = PruneMode::Before(800); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 900000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect two entries: one for block 600 and another for block 800 + assert_eq!(result.len(), 2); + assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); + assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); + } + + #[test] + fn test_group_by_block_with_distance_prune_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 100100; + // Pruned block is smaller than the target block + let pruned_block = Some(50); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 100 (tip - distance) + assert_eq!(result.len(), 1); + assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); + + let tip = 100100; + // Pruned block is larger than the target block + let pruned_block = Some(800); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 800 which is larger than tip - distance + assert_eq!(result.len(), 1); + assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); + } + + #[test] + fn test_lowest_block_with_distance_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when the config is empty"); + } + + #[test] + fn test_lowest_block_with_distance_no_distance_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when there are no Distance modes"); + } + + #[test] + fn test_lowest_block_with_distance_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + + let tip = 100100; + let pruned_block = Some(400); + + // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(400), + "The lowest block should be 400" + ); + + let tip = 100100; + let pruned_block = Some(50); + + // Expect the lowest block to be 100 as 100 > 50 (pruned block) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(100), + "The lowest block should be 100" + ); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_last() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100100); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100300 = 100000: + // - First iteration will return 100200 => 200300 - 100100 = 100200 + // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 + // - Final result is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_first() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100400 = 99900: + // - First iteration, lowest block is 200300 - 100400 = 99900 + // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 + // - Final result is 99900 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_pruned_block() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100000); + + // The lowest block should be 100000 because: + // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 + // - Lowest is compared to the pruned block 100000: 100000 > 99900 + // - Finally the lowest block is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } +} diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 9a8e55bb383..de9b9e6dc08 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -74,6 +74,11 @@ impl PruneMode { pub const fn is_full(&self) -> bool { matches!(self, Self::Full) } + + /// Returns true if the prune mode is [`PruneMode::Distance`]. + pub const fn is_distance(&self) -> bool { + matches!(self, Self::Distance(_)) + } } #[cfg(test)] From 82784183e74deb78556eb01da5b015904eb8d194 Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:47:12 -0400 Subject: [PATCH 242/970] feat: transaction trait (#11877) --- crates/primitives-traits/Cargo.toml | 1 - crates/primitives-traits/src/lib.rs | 3 +- .../primitives-traits/src/transaction/mod.rs | 55 +++++++++++++++---- 3 files changed, 46 insertions(+), 13 deletions(-) diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 4319232f824..6cafe8b8b1e 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -44,7 +44,6 @@ reth-testing-utils.workspace = true alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -arbitrary = { workspace = true, features = ["derive"] } bincode.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 57d1119b035..0489a250bbd 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,6 +14,7 @@ extern crate alloc; /// Common constants. pub mod constants; + pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account @@ -24,7 +25,7 @@ pub mod receipt; pub use receipt::Receipt; pub mod transaction; -pub use transaction::{signed::SignedTransaction, Transaction}; +pub use transaction::{signed::SignedTransaction, FullTransaction, Transaction}; mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index a306c5f76ed..a1ad81ab327 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,28 +1,61 @@ //! Transaction abstraction -pub mod signed; +use core::{fmt::Debug, hash::Hash}; -use alloc::fmt; +use alloy_primitives::{TxKind, B256}; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; -/// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} - -impl FullTransaction for T where T: Transaction + Compact {} +pub mod signed; +#[allow(dead_code)] /// Abstraction of a transaction. pub trait Transaction: - alloy_consensus::Transaction + Debug + + Default + Clone - + fmt::Debug - + PartialEq + Eq - + Default + + PartialEq + + Hash + + Serialize + alloy_rlp::Encodable + alloy_rlp::Decodable - + Serialize + for<'de> Deserialize<'de> + + alloy_consensus::Transaction + + MaybeArbitrary { + /// Heavy operation that return signature hash over rlp encoded transaction. + /// It is only for signature signing or signer recovery. + fn signature_hash(&self) -> B256; + + /// Gets the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> TxKind; + + /// Returns true if the tx supports dynamic fees + fn is_dynamic_fee(&self) -> bool; + + /// Returns the effective gas price for the given base fee. + fn effective_gas_price(&self, base_fee: Option) -> u128; + + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + fn encode_without_signature(&self, out: &mut dyn bytes::BufMut); + + /// Calculates a heuristic for the in-memory size of the [Transaction]. + fn size(&self) -> usize; } + +#[cfg(not(feature = "arbitrary"))] +/// Helper trait that requires arbitrary implementation if the feature is enabled. +pub trait MaybeArbitrary {} + +#[cfg(feature = "arbitrary")] +/// Helper trait that requires arbitrary implementation if the feature is enabled. +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullTransaction: Transaction + Compact {} + +impl FullTransaction for T where T: Transaction + Compact {} From 734c78fdfb46cc5a97971450ed74c6cbdf62d5af Mon Sep 17 00:00:00 2001 From: Kufre Samuel Date: Tue, 29 Oct 2024 22:24:35 +0100 Subject: [PATCH 243/970] feat(discv4): neighbors packet logging (#12042) Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 1 + crates/net/discv4/Cargo.toml | 1 + crates/net/discv4/src/lib.rs | 13 ++++++++++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 5b62244576e..6545563d95c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6922,6 +6922,7 @@ dependencies = [ "discv5", "enr", "generic-array", + "itertools 0.13.0", "parking_lot", "rand 0.8.5", "reth-ethereum-forks", diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 6fb66938885..f1c8410eeba 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -42,6 +42,7 @@ parking_lot.workspace = true rand = { workspace = true, optional = true } generic-array.workspace = true serde = { workspace = true, optional = true } +itertools.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index a99906bdf09..788e93048f1 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -38,6 +38,7 @@ use discv5::{ ConnectionDirection, ConnectionState, }; use enr::Enr; +use itertools::Itertools; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; use reth_ethereum_forks::ForkId; @@ -861,7 +862,7 @@ impl Discv4Service { let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { // skip half empty bucket - return false; + return false } self.remove_key(node_id, key) } @@ -1406,6 +1407,16 @@ impl Discv4Service { } }; + // log the peers we discovered + trace!(target: "discv4", + target=format!("{:#?}", node_id), + peers_count=msg.nodes.len(), + peers=format!("[{:#}]", msg.nodes.iter() + .map(|node_rec| node_rec.id + ).format(", ")), + "Received peers from Neighbours packet" + ); + // This is the recursive lookup step where we initiate new FindNode requests for new nodes // that were discovered. for node in msg.nodes.into_iter().map(NodeRecord::into_ipv4_mapped) { From 129f3ba9116da0fcbfafb78e66b7a0163e5464d3 Mon Sep 17 00:00:00 2001 From: frisitano <35734660+frisitano@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:48:43 +0800 Subject: [PATCH 244/970] feat: introduce StateCommitment type (#11842) --- Cargo.lock | 5 ++ crates/ethereum/node/Cargo.toml | 2 + crates/ethereum/node/src/node.rs | 2 + crates/exex/test-utils/Cargo.toml | 1 + crates/exex/test-utils/src/lib.rs | 1 + crates/node/builder/src/node.rs | 2 + crates/node/types/Cargo.toml | 1 + crates/node/types/src/lib.rs | 49 +++++++++++++------ crates/optimism/node/Cargo.toml | 2 + crates/optimism/node/src/node.rs | 2 + .../storage/provider/src/test_utils/mock.rs | 2 + crates/storage/provider/src/test_utils/mod.rs | 1 + crates/trie/common/src/key.rs | 18 +++++++ crates/trie/common/src/lib.rs | 3 ++ crates/trie/db/src/commitment.rs | 39 +++++++++++++++ crates/trie/db/src/lib.rs | 2 + examples/custom-engine-types/Cargo.toml | 1 + examples/custom-engine-types/src/main.rs | 2 + 18 files changed, 120 insertions(+), 15 deletions(-) create mode 100644 crates/trie/common/src/key.rs create mode 100644 crates/trie/db/src/commitment.rs diff --git a/Cargo.lock b/Cargo.lock index 6545563d95c..27581ba3a80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2856,6 +2856,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-tracing", + "reth-trie-db", "serde", "thiserror", "tokio", @@ -7564,6 +7565,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "reth-trie-db", "tempfile", "thiserror", "tokio", @@ -8024,6 +8026,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde_json", "tokio", @@ -8086,6 +8089,7 @@ dependencies = [ "reth-engine-primitives", "reth-primitives", "reth-primitives-traits", + "reth-trie-db", ] [[package]] @@ -8246,6 +8250,7 @@ dependencies = [ "reth-revm", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde", "serde_json", diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 98224b99e26..83b62034740 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -31,6 +31,7 @@ reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } +reth-trie-db.workspace = true # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -72,6 +73,7 @@ test-utils = [ "reth-db/test-utils", "reth-provider/test-utils", "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", "revm/test-utils", "reth-evm/test-utils" ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index dbd6ce0a134..1942f8a9e56 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -34,6 +34,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; @@ -81,6 +82,7 @@ impl EthereumNode { impl NodeTypes for EthereumNode { type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for EthereumNode { diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index b850295a332..cd0e0831b49 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -31,6 +31,7 @@ reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 1f6ea75ce6d..06aa8c81c7c 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -119,6 +119,7 @@ pub struct TestNode; impl NodeTypes for TestNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = reth_trie_db::MerklePatriciaTrie; } impl NodeTypesWithEngine for TestNode { diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3b2f467d61c..62c710ea802 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -69,6 +69,8 @@ where type Primitives = ::Primitives; type ChainSpec = ::ChainSpec; + + type StateCommitment = ::StateCommitment; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index 5747abe9c34..21facae5460 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -17,3 +17,4 @@ reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-trie-db.workspace = true diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 5ba03e6795a..38e194bd4fb 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -18,6 +18,7 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; +use reth_trie_db::StateCommitment; /// Configures all the primitive types of the node. pub trait NodePrimitives { @@ -39,6 +40,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type Primitives: NodePrimitives; /// The type used for configuration of the EVM. type ChainSpec: EthChainSpec; + /// The type used to perform state commitment operations. + type StateCommitment: StateCommitment; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -89,6 +92,7 @@ where { type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; + type StateCommitment = Types::StateCommitment; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -109,70 +113,85 @@ where /// A [`NodeTypes`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData); +pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); -impl AnyNodeTypes { +impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } /// A [`NodeTypesWithEngine`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl AnyNodeTypesWithEngine { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { + pub const fn primitives(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { + pub const fn engine(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Engine = E; } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 37cf4a328ea..deabbac5249 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,6 +28,7 @@ reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true +reth-trie-db.workspace = true # op-reth reth-optimism-payload-builder.workspace = true @@ -99,5 +100,6 @@ test-utils = [ "reth-db/test-utils", "reth-provider/test-utils", "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", "revm/test-utils" ] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 9492bb8c429..4328a55fb15 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -30,6 +30,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{ args::RollupArgs, @@ -122,6 +123,7 @@ where impl NodeTypes for OptimismNode { type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for OptimismNode { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 1053b4778fd..e2e08e61a86 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -34,6 +34,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, }; +use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::BTreeMap, @@ -157,6 +158,7 @@ pub struct MockNode; impl NodeTypes for MockNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl DatabaseProviderFactory for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2200781096d..c0e80930b31 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -25,6 +25,7 @@ pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< (), reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, + reth_trie_db::MerklePatriciaTrie, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs new file mode 100644 index 00000000000..9e440d199fa --- /dev/null +++ b/crates/trie/common/src/key.rs @@ -0,0 +1,18 @@ +use alloy_primitives::B256; +use revm_primitives::keccak256; + +/// Trait for hashing keys in state. +pub trait KeyHasher: Default + Clone + Send + Sync + 'static { + /// Hashes the given bytes into a 256-bit hash. + fn hash_key>(bytes: T) -> B256; +} + +/// A key hasher that uses the Keccak-256 hash function. +#[derive(Clone, Debug, Default)] +pub struct KeccakKeyHasher; + +impl KeyHasher for KeccakKeyHasher { + fn hash_key>(bytes: T) -> B256 { + keccak256(bytes) + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b9..7645ebd3a1c 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod hash_builder; mod account; pub use account::TrieAccount; +mod key; +pub use key::{KeccakKeyHasher, KeyHasher}; + mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; diff --git a/crates/trie/db/src/commitment.rs b/crates/trie/db/src/commitment.rs new file mode 100644 index 00000000000..c608aefff8a --- /dev/null +++ b/crates/trie/db/src/commitment.rs @@ -0,0 +1,39 @@ +use crate::{ + DatabaseHashedCursorFactory, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, + DatabaseTrieCursorFactory, DatabaseTrieWitness, +}; +use reth_db::transaction::DbTx; +use reth_trie::{ + proof::Proof, witness::TrieWitness, KeccakKeyHasher, KeyHasher, StateRoot, StorageRoot, +}; + +/// The `StateCommitment` trait provides associated types for state commitment operations. +pub trait StateCommitment: std::fmt::Debug + Send + Sync + Unpin + 'static { + /// The state root type. + type StateRoot<'a, TX: DbTx + 'a>: DatabaseStateRoot<'a, TX>; + /// The storage root type. + type StorageRoot<'a, TX: DbTx + 'a>: DatabaseStorageRoot<'a, TX>; + /// The state proof type. + type StateProof<'a, TX: DbTx + 'a>: DatabaseProof<'a, TX>; + /// The state witness type. + type StateWitness<'a, TX: DbTx + 'a>: DatabaseTrieWitness<'a, TX>; + /// The key hasher type. + type KeyHasher: KeyHasher; +} + +/// The state commitment type for Ethereum's Merkle Patricia Trie. +#[derive(Debug)] +#[non_exhaustive] +pub struct MerklePatriciaTrie; + +impl StateCommitment for MerklePatriciaTrie { + type StateRoot<'a, TX: DbTx + 'a> = + StateRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StorageRoot<'a, TX: DbTx + 'a> = + StorageRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StateProof<'a, TX: DbTx + 'a> = + Proof, DatabaseHashedCursorFactory<'a, TX>>; + type StateWitness<'a, TX: DbTx + 'a> = + TrieWitness, DatabaseHashedCursorFactory<'a, TX>>; + type KeyHasher = KeccakKeyHasher; +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 3a9b1e32823..27c18af6cbf 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -1,5 +1,6 @@ //! An integration of [`reth-trie`] with [`reth-db`]. +mod commitment; mod hashed_cursor; mod prefix_set; mod proof; @@ -8,6 +9,7 @@ mod storage; mod trie_cursor; mod witness; +pub use commitment::{MerklePatriciaTrie, StateCommitment}; pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index f826451d203..1fbb3c4947a 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -16,6 +16,7 @@ reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 30f89a0b9d7..a48c44b200b 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -70,6 +70,7 @@ use reth_payload_builder::{ }; use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; +use reth_trie_db::MerklePatriciaTrie; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -228,6 +229,7 @@ struct MyCustomNode; impl NodeTypes for MyCustomNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } /// Configure the node types with the custom engine types From 367b4ed18af583305c18adb118be0b303e55aa8f Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:10:14 +0100 Subject: [PATCH 245/970] chore(meta): update SECURITY.md (#12190) --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 5260d529f5a..bea27ad1140 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,4 +2,4 @@ ## Reporting a Vulnerability -Contact georgios at paradigm.xyz. +Contact [security@ithaca.xyz](mailto:security@ithaca.xyz). From 6e794ee6738c4f6c610010330d76e5a845b091b2 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 30 Oct 2024 07:29:48 -0400 Subject: [PATCH 246/970] fix(ecies): bound initial header body size (#12172) --- crates/net/ecies/src/algorithm.rs | 3 ++- crates/net/ecies/src/codec.rs | 36 +++++++++++++++++++++++++++---- crates/net/ecies/src/error.rs | 8 +++++++ 3 files changed, 42 insertions(+), 5 deletions(-) diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 6bf9717fe52..e4266d9a06f 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -650,7 +650,8 @@ impl ECIES { out.extend_from_slice(tag.as_slice()); } - /// Extracts the header from slice and returns the body size. + /// Reads the `RLPx` header from the slice, setting up the MAC and AES, returning the body + /// size contained in the header. pub fn read_header(&mut self, data: &mut [u8]) -> Result { // If the data is not large enough to fit the header and mac bytes, return an error // diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index c3e9b8d58cc..b5a10284cf2 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,12 +1,15 @@ //! This contains the main codec for `RLPx` ECIES messages -use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; +use crate::{algorithm::ECIES, ECIESError, ECIESErrorImpl, EgressECIESValue, IngressECIESValue}; use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; use std::{fmt::Debug, io}; use tokio_util::codec::{Decoder, Encoder}; use tracing::{instrument, trace}; +/// The max size that the initial handshake packet can be. Currently 2KiB. +const MAX_INITIAL_HANDSHAKE_SIZE: usize = 2048; + /// Tokio codec for ECIES #[derive(Debug)] pub struct ECIESCodec { @@ -26,6 +29,11 @@ pub enum ECIESState { /// message containing the nonce and other metadata. Ack, + /// This is the same as the [`ECIESState::Header`] stage, but occurs only after the first + /// [`ECIESState::Ack`] message. This is so that the initial handshake message can be properly + /// validated. + InitialHeader, + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks /// performed, and message is decrypted. Header, @@ -70,7 +78,7 @@ impl Decoder for ECIESCodec { self.ecies.read_auth(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))) } ECIESState::Ack => { @@ -89,9 +97,29 @@ impl Decoder for ECIESCodec { self.ecies.read_ack(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::Ack)) } + ECIESState::InitialHeader => { + if buf.len() < ECIES::header_len() { + trace!("current len {}, need {}", buf.len(), ECIES::header_len()); + return Ok(None) + } + + let body_size = + self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; + + if body_size > MAX_INITIAL_HANDSHAKE_SIZE { + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { + body_size, + max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, + } + .into()) + } + + self.state = ECIESState::Body; + } ECIESState::Header => { if buf.len() < ECIES::header_len() { trace!("current len {}, need {}", buf.len(), ECIES::header_len()); @@ -131,7 +159,7 @@ impl Encoder for ECIESCodec { Ok(()) } EgressECIESValue::Ack => { - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; self.ecies.write_ack(buf); Ok(()) } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 79965f73303..9dabfc16183 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -62,6 +62,14 @@ pub enum ECIESErrorImpl { /// The encrypted data is not large enough for all fields #[error("encrypted data is not large enough for all fields")] EncryptedDataTooSmall, + /// The initial header body is too large. + #[error("initial header body is {body_size} but the max is {max_body_size}")] + InitialHeaderBodyTooLarge { + /// The body size from the header + body_size: usize, + /// The max body size + max_body_size: usize, + }, /// Error when trying to split an array beyond its length #[error("requested {idx} but array len is {len}")] OutOfBounds { From 2778ba3d52605e3837ef1abd29f9ef956e0e8140 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 30 Oct 2024 12:30:49 +0100 Subject: [PATCH 247/970] tx-pool: fix `ExceedsGasLimit` error message order (#12191) --- crates/transaction-pool/src/pool/txpool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index b11815fc4b5..bd0aacccfee 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -628,8 +628,8 @@ impl TxPool { *transaction.hash(), PoolErrorKind::InvalidTransaction( InvalidPoolTransactionError::ExceedsGasLimit( - block_gas_limit, tx_gas_limit, + block_gas_limit, ), ), )), From 93a9b8a218309a296d96d5841df4f4b7b60701cd Mon Sep 17 00:00:00 2001 From: cody-wang-cb Date: Wed, 30 Oct 2024 08:59:19 -0400 Subject: [PATCH 248/970] feat: Eip1559 params in extradata (#11887) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Matthias Seitz --- .../ethereum/engine-primitives/src/payload.rs | 2 +- crates/ethereum/evm/src/lib.rs | 7 +- crates/ethereum/payload/src/lib.rs | 10 +- crates/evm/Cargo.toml | 16 +- crates/evm/src/lib.rs | 5 +- crates/optimism/chainspec/src/lib.rs | 174 +++++++++++++++++- crates/optimism/evm/src/config.rs | 8 +- crates/optimism/evm/src/lib.rs | 15 +- crates/optimism/hardforks/src/hardfork.rs | 4 + crates/optimism/node/src/engine.rs | 137 +++++++++++++- crates/optimism/node/tests/e2e/utils.rs | 1 + crates/optimism/payload/src/builder.rs | 37 +++- crates/optimism/payload/src/error.rs | 14 ++ crates/optimism/payload/src/payload.rs | 64 ++++++- examples/custom-evm/src/main.rs | 5 +- examples/stateful-precompile/src/main.rs | 5 +- 16 files changed, 461 insertions(+), 43 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 2d162ef1505..ed377d003dd 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -184,7 +184,7 @@ impl From for ExecutionPayloadEnvelopeV4 { } /// Container type for all components required to build a payload. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct EthPayloadBuilderAttributes { /// Id of the payload pub id: PayloadId, diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 91da90d99f0..1c340c0927b 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -17,6 +17,8 @@ extern crate alloc; +use core::convert::Infallible; + use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; @@ -59,6 +61,7 @@ impl EthEvmConfig { impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -131,7 +134,7 @@ impl ConfigureEvmEnv for EthEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -179,7 +182,7 @@ impl ConfigureEvmEnv for EthEvmConfig { blob_excess_gas_and_price, }; - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + Ok((CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)) } } diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 8e188f890fd..27d9d98bcca 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -73,7 +73,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -97,7 +97,9 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; let pool = args.pool.clone(); default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { @@ -120,7 +122,9 @@ where None, ); - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; let pool = args.pool.clone(); diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 90fd532828e..a4ce3c3893b 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -57,12 +57,12 @@ std = [ "revm/std", ] test-utils = [ - "dep:parking_lot", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "revm/test-utils", - "reth-prune-types/test-utils" + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" ] diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index b75feea83a1..e30ff9b1a7a 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -116,6 +116,9 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. + type Error: core::error::Error + Send + Sync; + /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); @@ -192,7 +195,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv); + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error>; } /// Represents additional attributes required to configure the next block. diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 03ce75aec04..70adf2272cf 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -20,10 +20,10 @@ mod op_sepolia; use alloc::{vec, vec::Vec}; use alloy_chains::Chain; use alloy_genesis::Genesis; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{Bytes, Parity, Signature, B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; -use derive_more::{Constructor, Deref, From, Into}; +use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; #[cfg(not(feature = "std"))] pub(crate) use once_cell::sync::Lazy as LazyLock; @@ -159,6 +159,16 @@ impl OpChainSpecBuilder { self } + /// Enable Holocene at genesis + pub fn holocene_activated(mut self) -> Self { + self = self.granite_activated(); + self.inner = self.inner.with_fork( + reth_optimism_forks::OptimismHardfork::Holocene, + ForkCondition::Timestamp(0), + ); + self + } + /// Build the resulting [`OpChainSpec`]. /// /// # Panics @@ -177,6 +187,81 @@ pub struct OpChainSpec { pub inner: ChainSpec, } +impl OpChainSpec { + /// Read from parent to determine the base fee for the next block + pub fn next_block_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + let is_holocene_activated = self.inner.is_fork_active_at_timestamp( + reth_optimism_forks::OptimismHardfork::Holocene, + timestamp, + ); + // If we are in the Holocene, we need to use the base fee params + // from the parent block's extra data. + // Else, use the base fee params (default values) from chainspec + if is_holocene_activated { + let (denominator, elasticity) = decode_holocene_1559_params(parent.extra_data.clone())?; + if elasticity == 0 && denominator == 0 { + return Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )); + } + let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); + Ok(U256::from(parent.next_block_base_fee(base_fee_params).unwrap_or_default())) + } else { + Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )) + } + } +} + +#[derive(Clone, Debug, Display, Eq, PartialEq)] +/// Error type for decoding Holocene 1559 parameters +pub enum DecodeError { + #[display("Insufficient data to decode")] + /// Insufficient data to decode + InsufficientData, + #[display("Invalid denominator parameter")] + /// Invalid denominator parameter + InvalidDenominator, + #[display("Invalid elasticity parameter")] + /// Invalid elasticity parameter + InvalidElasticity, +} + +impl core::error::Error for DecodeError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + // None of the errors have sub-errors + None + } +} + +/// Extracts the Holcene 1599 parameters from the encoded form: +/// +pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), DecodeError> { + if extra_data.len() < 9 { + return Err(DecodeError::InsufficientData); + } + let denominator: [u8; 4] = + extra_data[1..5].try_into().map_err(|_| DecodeError::InvalidDenominator)?; + let elasticity: [u8; 4] = + extra_data[5..9].try_into().map_err(|_| DecodeError::InvalidElasticity)?; + Ok((u32::from_be_bytes(denominator), u32::from_be_bytes(elasticity))) +} + +/// Returns the signature for the optimism deposit transactions, which don't include a +/// signature. +pub fn optimism_deposit_tx_signature() -> Signature { + Signature::new(U256::ZERO, U256::ZERO, Parity::Parity(false)) +} + impl EthChainSpec for OpChainSpec { fn chain(&self) -> alloy_chains::Chain { self.inner.chain() @@ -405,6 +490,8 @@ impl OptimismGenesisInfo { #[cfg(test)] mod tests { + use std::sync::Arc; + use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; @@ -919,4 +1006,87 @@ mod tests { .all(|(expected, actual)| &**expected == *actual)); assert_eq!(expected_hardforks.len(), hardforks.len()); } + + #[test] + fn test_get_base_fee_pre_holocene() { + let op_chain_spec = &BASE_SEPOLIA; + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 0); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + fn holocene_chainspec() -> Arc { + let mut hardforks = OptimismHardfork::base_sepolia(); + hardforks.insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: crate::constants::BASE_SEPOLIA_MAX_GAS_LIMIT, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + #[test] + fn test_get_base_fee_holocene_nonce_not_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + timestamp: 1800000003, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + #[test] + fn test_get_base_fee_holocene_nonce_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), + timestamp: 1800000003, + ..Default::default() + }; + + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) + .unwrap_or_default() + ) + ); + } } diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 668fcba4ddc..b00341ff677 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -12,7 +12,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { + if chain_spec.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { revm_primitives::FJORD @@ -29,7 +31,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { + if chain_spec.fork(OptimismHardfork::Holocene).active_at_head(block) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { revm_primitives::FJORD diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index bc46f3ea9c2..03aecf2c83e 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -15,7 +15,7 @@ extern crate alloc; use alloc::{sync::Arc, vec::Vec}; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_chainspec::{DecodeError, OpChainSpec}; use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, transaction::FillTxEnv, @@ -56,6 +56,7 @@ impl OptimismEvmConfig { impl ConfigureEvmEnv for OptimismEvmConfig { type Header = Header; + type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -134,7 +135,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -156,13 +157,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { prevrandao: Some(attributes.prev_randao), gas_limit: U256::from(parent.gas_limit), // calculate basefee based on parent block's gas usage - basefee: U256::from( - parent - .next_block_base_fee( - self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), - ) - .unwrap_or_default(), - ), + basefee: self.chain_spec.next_block_base_fee(parent, attributes.timestamp)?, // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; @@ -175,7 +170,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { }; } - (cfg_with_handler_cfg, block_env) + Ok((cfg_with_handler_cfg, block_env)) } } diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 011c4ae72fd..440314e3711 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -31,6 +31,8 @@ hardfork!( Fjord, /// Granite: Granite, + /// Holocene: + Holocene, } ); @@ -156,6 +158,7 @@ impl OptimismHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), + Self::Holocene => None, }, ) } @@ -190,6 +193,7 @@ impl OptimismHardfork { Self::Ecotone => Some(1710374401), Self::Fjord => Some(1720627201), Self::Granite => Some(1726070401), + Self::Holocene => None, }, ) } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index cec609671a3..966d87279c5 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -15,7 +15,9 @@ use reth_node_api::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OptimismHardfork; -use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +use reth_optimism_payload_builder::{ + builder::decode_eip_1559_params, OptimismBuiltPayload, OptimismPayloadBuilderAttributes, +}; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -147,6 +149,139 @@ where )) } + if self.chain_spec.is_fork_active_at_timestamp( + OptimismHardfork::Holocene, + attributes.payload_attributes.timestamp, + ) { + let Some(eip_1559_params) = attributes.eip_1559_params else { + return Err(EngineObjectValidationError::InvalidParams( + "MissingEip1559ParamsInPayloadAttributes".to_string().into(), + )) + }; + let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); + if elasticity != 0 && denominator == 0 { + return Err(EngineObjectValidationError::InvalidParams( + "Eip1559ParamsDenominatorZero".to_string().into(), + )) + } + } + Ok(()) } } + +#[cfg(test)] +mod test { + + use crate::engine; + use alloy_primitives::{b64, Address, B256, B64}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_chainspec::ForkCondition; + use reth_optimism_chainspec::BASE_SEPOLIA; + + use super::*; + + fn get_chainspec(is_holocene: bool) -> Arc { + let mut hardforks = OptimismHardfork::base_sepolia(); + if is_holocene { + hardforks + .insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + } + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: BASE_SEPOLIA + .inner + .paris_block_and_final_difficulty, + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: BASE_SEPOLIA.inner.max_gas_limit, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + const fn get_attributes(eip_1559_params: Option, timestamp: u64) -> OpPayloadAttributes { + OpPayloadAttributes { + gas_limit: Some(1000), + eip_1559_params, + transactions: None, + no_tx_pool: None, + payload_attributes: PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + } + } + + #[test] + fn test_well_formed_attributes_pre_holocene() { + let validator = OptimismEngineValidator::new(get_chainspec(false)); + let attributes = get_attributes(None, 1799999999); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_no_eip1559_params() { + let validator = OptimismEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(None, 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { + let validator = OptimismEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000000000008")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_valid() { + let validator = OptimismEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000800000008")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_valid_all_zero() { + let validator = OptimismEngineValidator::new(get_chainspec(true)); + let attributes = get_attributes(Some(b64!("0000000000000000")), 1800000000); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } +} diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 48175e5b21a..d4219b0fea1 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -63,5 +63,6 @@ pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuil transactions: vec![], no_tx_pool: false, gas_limit: Some(30_000_000), + eip_1559_params: None, } } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0550adeaa8d..e9b7e2c76f8 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,10 +1,9 @@ //! Optimism payload builder implementation. - use std::sync::Arc; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::U256; +use alloy_primitives::{B64, U256}; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpecProvider; @@ -80,7 +79,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -104,7 +103,9 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) } @@ -133,7 +134,9 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_header); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? .into_payload() .ok_or_else(|| PayloadBuilderError::MissingPayload) @@ -168,7 +171,7 @@ where let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_header, attributes, extra_data } = config; + let PayloadConfig { parent_header, attributes, mut extra_data } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); @@ -470,6 +473,19 @@ where (None, None) }; + let is_holocene = chain_spec.is_fork_active_at_timestamp( + OptimismHardfork::Holocene, + attributes.payload_attributes.timestamp, + ); + + if is_holocene { + extra_data = attributes + .get_holocene_extra_data( + chain_spec.base_fee_params_at_timestamp(attributes.payload_attributes.timestamp), + ) + .map_err(PayloadBuilderError::other)?; + } + let header = Header { parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -532,3 +548,12 @@ where Ok(BuildOutcome::Better { payload, cached_reads }) } } + +/// Extracts the Holocene 1599 parameters from the encoded form: +/// +pub fn decode_eip_1559_params(eip_1559_params: B64) -> (u32, u32) { + let denominator: [u8; 4] = eip_1559_params.0[..4].try_into().expect("sufficient length"); + let elasticity: [u8; 4] = eip_1559_params.0[4..8].try_into().expect("sufficient length"); + + (u32::from_be_bytes(elasticity), u32::from_be_bytes(denominator)) +} diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 2016fdc6dd9..ce5f584a1ce 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -21,3 +21,17 @@ pub enum OptimismPayloadBuilderError { #[error("blob transaction included in sequencer block")] BlobTransactionRejected, } + +/// Error type for EIP-1559 parameters +#[derive(Debug, thiserror::Error)] +pub enum EIP1559ParamError { + /// No EIP-1559 parameters provided + #[error("No EIP-1559 parameters provided")] + NoEIP1559Params, + /// Denominator overflow + #[error("Denominator overflow")] + DenominatorOverflow, + /// Elasticity overflow + #[error("Elasticity overflow")] + ElasticityOverflow, +} diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 7f95d04ad9f..056edfe7b63 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,8 +2,9 @@ //! Optimism builder support -use alloy_eips::{eip2718::Decodable2718, eip7685::Requests}; -use alloy_primitives::{keccak256, Address, B256, U256}; +use crate::{builder::decode_eip_1559_params, error::EIP1559ParamError}; +use alloy_eips::{eip1559::BaseFeeParams, eip2718::Decodable2718, eip7685::Requests}; +use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; /// Re-export for use in downstream arguments. @@ -23,7 +24,7 @@ use reth_rpc_types_compat::engine::payload::{ use std::sync::Arc; /// Optimism Payload Builder Attributes -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct OptimismPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, @@ -34,6 +35,42 @@ pub struct OptimismPayloadBuilderAttributes { pub transactions: Vec>, /// The gas limit for the generated payload pub gas_limit: Option, + /// EIP-1559 parameters for the generated payload + pub eip_1559_params: Option, +} + +impl OptimismPayloadBuilderAttributes { + /// Extracts the `eip1559` parameters for the payload. + pub fn get_holocene_extra_data( + &self, + default_base_fee_params: BaseFeeParams, + ) -> Result { + let eip_1559_params = self.eip_1559_params.ok_or(EIP1559ParamError::NoEIP1559Params)?; + + let mut extra_data = [0u8; 9]; + // If eip 1559 params aren't set, use the canyon base fee param constants + // otherwise use them + if eip_1559_params.is_zero() { + // Try casting max_change_denominator to u32 + let max_change_denominator: u32 = (default_base_fee_params.max_change_denominator) + .try_into() + .map_err(|_| EIP1559ParamError::DenominatorOverflow)?; + + // Try casting elasticity_multiplier to u32 + let elasticity_multiplier: u32 = (default_base_fee_params.elasticity_multiplier) + .try_into() + .map_err(|_| EIP1559ParamError::ElasticityOverflow)?; + + // Copy the values safely + extra_data[1..5].copy_from_slice(&max_change_denominator.to_be_bytes()); + extra_data[5..9].copy_from_slice(&elasticity_multiplier.to_be_bytes()); + } else { + let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); + extra_data[1..5].copy_from_slice(&denominator.to_be_bytes()); + extra_data[5..9].copy_from_slice(&elasticity.to_be_bytes()); + } + Ok(Bytes::copy_from_slice(&extra_data)) + } } impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { @@ -82,6 +119,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { no_tx_pool: attributes.no_tx_pool.unwrap_or_default(), transactions, gas_limit: attributes.gas_limit, + eip_1559_params: attributes.eip_1559_params, }) } @@ -370,4 +408,24 @@ mod tests { ) ); } + + #[test] + fn test_get_extra_data_post_holocene() { + let attributes = OptimismPayloadBuilderAttributes { + eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), + ..Default::default() + }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8])); + } + + #[test] + fn test_get_extra_data_post_holocene_default() { + let attributes = OptimismPayloadBuilderAttributes { + eip_1559_params: Some(B64::ZERO), + ..Default::default() + }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); + } } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 55063fc9bbc..16aad63c093 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -38,7 +38,7 @@ use reth_primitives::{ Header, TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; -use std::sync::Arc; +use std::{convert::Infallible, sync::Arc}; /// Custom EVM configuration #[derive(Debug, Clone)] @@ -87,6 +87,7 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender); @@ -115,7 +116,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index b0165e4de26..371fbf4f78b 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -30,7 +30,7 @@ use reth_primitives::{ }; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, convert::Infallible, sync::Arc}; /// Type alias for the LRU cache used within the [`PrecompileCache`]. type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; @@ -147,6 +147,7 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender) @@ -175,7 +176,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } From ff9a42ae8fbf441f4fb9ca8dcea84c765b284721 Mon Sep 17 00:00:00 2001 From: Abhishek kochar Date: Wed, 30 Oct 2024 22:13:03 +0800 Subject: [PATCH 249/970] feat(eth69): support for ETH69 (#12158) Signed-off-by: Abhishekkochar Co-authored-by: Matthias Seitz --- crates/net/eth-wire-types/src/message.rs | 10 +++++++++- crates/net/eth-wire-types/src/version.rs | 20 ++++++++++++++++---- crates/net/network/src/transactions/mod.rs | 2 +- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 4afcb34e13b..8546bfe14c8 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -50,9 +50,17 @@ impl ProtocolMessage { let message = match message_type { EthMessageID::Status => EthMessage::Status(Status::decode(buf)?), EthMessageID::NewBlockHashes => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlockHashes)); + } EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?) } - EthMessageID::NewBlock => EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)), + EthMessageID::NewBlock => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); + } + EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)) + } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), EthMessageID::NewPooledTransactionHashes => { if version >= EthVersion::Eth68 { diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 4fd3e792dcc..5a2e0ff9651 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -18,12 +18,12 @@ pub struct ParseVersionError(String); pub enum EthVersion { /// The `eth` protocol version 66. Eth66 = 66, - /// The `eth` protocol version 67. Eth67 = 67, - /// The `eth` protocol version 68. Eth68 = 68, + /// The `eth` protocol version 69. + Eth69 = 69, } impl EthVersion { @@ -38,6 +38,8 @@ impl EthVersion { // eth/67,68 are eth/66 minus GetNodeData and NodeData messages 13 } + // eth69 is both eth67 and eth68 minus NewBlockHashes and NewBlock + Self::Eth69 => 11, } } @@ -55,6 +57,11 @@ impl EthVersion { pub const fn is_eth68(&self) -> bool { matches!(self, Self::Eth68) } + + /// Returns true if the version is eth/69 + pub const fn is_eth69(&self) -> bool { + matches!(self, Self::Eth69) + } } /// Allow for converting from a `&str` to an `EthVersion`. @@ -75,6 +82,7 @@ impl TryFrom<&str> for EthVersion { "66" => Ok(Self::Eth66), "67" => Ok(Self::Eth67), "68" => Ok(Self::Eth68), + "69" => Ok(Self::Eth69), _ => Err(ParseVersionError(s.to_string())), } } @@ -98,6 +106,7 @@ impl TryFrom for EthVersion { 66 => Ok(Self::Eth66), 67 => Ok(Self::Eth67), 68 => Ok(Self::Eth68), + 69 => Ok(Self::Eth69), _ => Err(ParseVersionError(u.to_string())), } } @@ -126,6 +135,7 @@ impl From for &'static str { EthVersion::Eth66 => "66", EthVersion::Eth67 => "67", EthVersion::Eth68 => "68", + EthVersion::Eth69 => "69", } } } @@ -179,7 +189,8 @@ mod tests { assert_eq!(EthVersion::Eth66, EthVersion::try_from("66").unwrap()); assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap()); assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), EthVersion::try_from("69")); + assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70")); } #[test] @@ -187,6 +198,7 @@ mod tests { assert_eq!(EthVersion::Eth66, "66".parse().unwrap()); assert_eq!(EthVersion::Eth67, "67".parse().unwrap()); assert_eq!(EthVersion::Eth68, "68".parse().unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), "69".parse::()); + assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index b6e589c6ff7..4e23c8527b4 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1668,7 +1668,7 @@ impl PooledTransactionsHashesBuilder { fn new(version: EthVersion) -> Self { match version { EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), - EthVersion::Eth68 => Self::Eth68(Default::default()), + EthVersion::Eth68 | EthVersion::Eth69 => Self::Eth68(Default::default()), } } From bb8da983b0260c8aa2912b20516a8518f2f8cd20 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Oct 2024 16:39:12 +0100 Subject: [PATCH 250/970] feat: add is_ethereum trait fn (#12197) --- crates/chainspec/src/api.rs | 5 +++++ crates/net/discv5/src/network_stack_id.rs | 5 ++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 3751789cac8..36640f34b70 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -57,6 +57,11 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn is_optimism(&self) -> bool { self.chain().is_optimism() } + + /// Returns `true` if this chain contains Ethereum configuration. + fn is_ethereum(&self) -> bool { + self.chain().is_ethereum() + } } impl EthChainSpec for ChainSpec { diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs index f707c7de7b7..a7b6944f355 100644 --- a/crates/net/discv5/src/network_stack_id.rs +++ b/crates/net/discv5/src/network_stack_id.rs @@ -20,12 +20,11 @@ impl NetworkStackId { /// ENR fork ID kv-pair key, for an Optimism CL node. pub const OPSTACK: &'static [u8] = b"opstack"; - #[allow(clippy::missing_const_for_fn)] /// Returns the [`NetworkStackId`] that matches the given chain spec. pub fn id(chain: impl EthChainSpec) -> Option<&'static [u8]> { - if chain.chain().is_optimism() { + if chain.is_optimism() { return Some(Self::OPEL) - } else if chain.chain().is_ethereum() { + } else if chain.is_ethereum() { return Some(Self::ETH) } From 755fac08ddeec0f4260fa5565bac72d6cb392348 Mon Sep 17 00:00:00 2001 From: Kaushik Donthi Date: Wed, 30 Oct 2024 09:06:37 -0700 Subject: [PATCH 251/970] Wrap sidecar in arcs (#11554) Co-authored-by: Matthias Seitz --- crates/ethereum/payload/src/lib.rs | 2 +- crates/transaction-pool/src/blobstore/disk.rs | 44 ++++++++++++------- crates/transaction-pool/src/blobstore/mem.rs | 21 ++++----- crates/transaction-pool/src/blobstore/mod.rs | 12 +++-- crates/transaction-pool/src/blobstore/noop.rs | 10 +++-- crates/transaction-pool/src/lib.rs | 9 ++-- crates/transaction-pool/src/maintain.rs | 2 + crates/transaction-pool/src/noop.rs | 9 ++-- crates/transaction-pool/src/pool/mod.rs | 4 +- crates/transaction-pool/src/traits.rs | 9 ++-- .../src/mined_sidecar.rs | 8 ++-- 11 files changed, 84 insertions(+), 46 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 27d9d98bcca..8e92d0aa870 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -456,7 +456,7 @@ where EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed), requests); // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); + payload.extend_sidecars(blob_sidecars.into_iter().map(Arc::unwrap_or_clone)); Ok(BuildOutcome::Better { payload, cached_reads }) } diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 787d4985ff1..987264853db 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -103,7 +103,7 @@ impl BlobStore for DiskFileBlobStore { stat } - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { self.inner.get_one(tx) } @@ -114,14 +114,17 @@ impl BlobStore for DiskFileBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } self.inner.get_all(txs) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } @@ -164,7 +167,7 @@ impl BlobStore for DiskFileBlobStore { struct DiskFileBlobStoreInner { blob_dir: PathBuf, - blob_cache: Mutex>, + blob_cache: Mutex, ByLength>>, size_tracker: BlobStoreSize, file_lock: RwLock<()>, txs_to_delete: RwLock>, @@ -205,7 +208,7 @@ impl DiskFileBlobStoreInner { fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); data.rlp_encode_fields(&mut buf); - self.blob_cache.lock().insert(tx, data); + self.blob_cache.lock().insert(tx, Arc::new(data)); let size = self.write_one_encoded(tx, &buf)?; self.size_tracker.add_size(size); @@ -227,7 +230,7 @@ impl DiskFileBlobStoreInner { { let mut cache = self.blob_cache.lock(); for (tx, data) in txs { - cache.insert(tx, data); + cache.insert(tx, Arc::new(data)); } } let mut add = 0; @@ -278,15 +281,19 @@ impl DiskFileBlobStoreInner { } /// Retrieves the blob for the given transaction hash from the blob cache or disk. - fn get_one(&self, tx: B256) -> Result, BlobStoreError> { + fn get_one(&self, tx: B256) -> Result>, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } let blob = self.read_one(tx)?; + if let Some(blob) = &blob { - self.blob_cache.lock().insert(tx, blob.clone()); + let blob_arc = Arc::new(blob.clone()); + self.blob_cache.lock().insert(tx, blob_arc.clone()); + return Ok(Some(blob_arc)) } - Ok(blob) + + Ok(None) } /// Returns the path to the blob file for the given transaction hash. @@ -374,7 +381,7 @@ impl DiskFileBlobStoreInner { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let mut res = Vec::with_capacity(txs.len()); let mut cache_miss = Vec::new(); { @@ -396,8 +403,9 @@ impl DiskFileBlobStoreInner { } let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - cache.insert(tx, data.clone()); - res.push((tx, data)); + let arc = Arc::new(data.clone()); + cache.insert(tx, arc.clone()); + res.push((tx, arc.clone())); } Ok(res) @@ -407,7 +415,10 @@ impl DiskFileBlobStoreInner { /// /// Returns an error if there are any missing blobs. #[inline] - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { txs.into_iter() .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) .collect() @@ -514,14 +525,17 @@ mod tests { let blobs = rng_blobs(10); let all_hashes = blobs.iter().map(|(tx, _)| *tx).collect::>(); store.insert_all(blobs.clone()).unwrap(); + // all cached for (tx, blob) in &blobs { assert!(store.is_cached(tx)); - assert_eq!(store.get(*tx).unwrap().unwrap(), *blob); + let b = store.get(*tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(b, *blob); } + let all = store.get_all(all_hashes.clone()).unwrap(); for (tx, blob) in all { - assert!(blobs.contains(&(tx, blob)), "missing blob {tx:?}"); + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))), "missing blob {tx:?}"); } assert!(store.contains(all_hashes[0]).unwrap()); diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index c98a01b88c1..cea1837bdcd 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -15,7 +15,7 @@ pub struct InMemoryBlobStore { #[derive(Debug, Default)] struct InMemoryBlobStoreInner { /// Storage for all blob data. - store: RwLock>, + store: RwLock>>, size_tracker: BlobStoreSize, } @@ -75,7 +75,7 @@ impl BlobStore for InMemoryBlobStore { } // Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { Ok(self.inner.store.read().get(&tx).cloned()) } @@ -86,16 +86,17 @@ impl BlobStore for InMemoryBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let store = self.inner.store.read(); Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect()) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { let store = self.inner.store.read(); - txs.into_iter() - .map(|tx| store.get(&tx).cloned().ok_or_else(|| BlobStoreError::MissingSidecar(tx))) - .collect() + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect()) } fn get_by_versioned_hashes( @@ -134,7 +135,7 @@ impl BlobStore for InMemoryBlobStore { /// Removes the given blob from the store and returns the size of the blob that was removed. #[inline] -fn remove_size(store: &mut HashMap, tx: &B256) -> usize { +fn remove_size(store: &mut HashMap>, tx: &B256) -> usize { store.remove(tx).map(|rem| rem.size()).unwrap_or_default() } @@ -143,11 +144,11 @@ fn remove_size(store: &mut HashMap, tx: &B256) -> /// We don't need to handle the size updates for replacements because transactions are unique. #[inline] fn insert_size( - store: &mut HashMap, + store: &mut HashMap>, tx: B256, blob: BlobTransactionSidecar, ) -> usize { let add = blob.size(); - store.insert(tx, blob); + store.insert(tx, Arc::new(blob)); add } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index ee98e3eed85..f8d37bfcc0f 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -8,7 +8,10 @@ pub use noop::NoopBlobStore; use reth_primitives::BlobTransactionSidecar; use std::{ fmt, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; @@ -44,7 +47,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn cleanup(&self) -> BlobStoreCleanupStat; /// Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError>; + fn get(&self, tx: B256) -> Result>, BlobStoreError>; /// Checks if the given transaction hash is in the blob store. fn contains(&self, tx: B256) -> Result; @@ -58,13 +61,14 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [`BlobTransactionSidecar`] for the given transaction hashes in the exact /// order they were requested. /// /// Returns an error if any of the blobs are not found in the blob store. - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + fn get_exact(&self, txs: Vec) + -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_by_versioned_hashes( diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 0e99858bd62..0f293573556 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,6 +1,7 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::B256; +use std::sync::Arc; /// A blobstore implementation that does nothing #[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)] @@ -28,7 +29,7 @@ impl BlobStore for NoopBlobStore { BlobStoreCleanupStat::default() } - fn get(&self, _tx: B256) -> Result, BlobStoreError> { + fn get(&self, _tx: B256) -> Result>, BlobStoreError> { Ok(None) } @@ -39,11 +40,14 @@ impl BlobStore for NoopBlobStore { fn get_all( &self, _txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 609ab987f50..3a5e547ba4e 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -561,21 +561,24 @@ where self.pool.unique_senders() } - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError> { self.pool.blob_store().get(tx_hash) } fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { self.pool.blob_store().get_all(tx_hashes) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { self.pool.blob_store().get_exact(tx_hashes) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 09c042ae66d..b62a6c18c0b 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -27,6 +27,7 @@ use std::{ collections::HashSet, hash::{Hash, Hasher}, path::{Path, PathBuf}, + sync::Arc, }; use tokio::sync::oneshot; use tracing::{debug, error, info, trace, warn}; @@ -328,6 +329,7 @@ pub async fn maintain_transaction_pool( pool.get_blob(tx.hash) .ok() .flatten() + .map(Arc::unwrap_or_clone) .and_then(|sidecar| { PooledTransactionsElementEcRecovered::try_from_blob_transaction( tx, sidecar, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 817ea7bad7a..4f4e5a3813a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -275,21 +275,24 @@ impl TransactionPool for NoopTransactionPool { Default::default() } - fn get_blob(&self, _tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + _tx_hash: TxHash, + ) -> Result>, BlobStoreError> { Ok(None) } fn get_all_blobs( &self, _tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { if tx_hashes.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index fef0fd0eea0..2e73409546d 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -307,7 +307,9 @@ where /// Caution: this assumes the given transaction is eip-4844 fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { + if let Ok(blob) = + BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) + { return Some(blob) } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fbbddb98f43..1b300415492 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -443,7 +443,10 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the [BlobTransactionSidecar] for the given transaction hash if it exists in the blob /// store. - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError>; + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError>; /// Returns all [BlobTransactionSidecar] for the given transaction hashes if they exists in the /// blob store. @@ -453,7 +456,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order /// they were requested. @@ -462,7 +465,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_blobs_for_versioned_hashes( diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 1c53e4f4105..2436ee0210e 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::VecDeque, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use thiserror::Error; @@ -110,9 +111,10 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { actions_to_queue.reserve_exact(txs.len()); - for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { - let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); + for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) { + let transaction = + BlobTransaction::try_from_signed(tx.clone(), Arc::unwrap_or_clone(sidecar)) + .expect("should not fail to convert blob tx if it is already eip4844"); let block_metadata = BlockMetadata { block_hash: block.hash(), From b3e8327065ccb2570913ec9f99aa142f7bb8a962 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Oct 2024 18:56:36 +0100 Subject: [PATCH 252/970] chore: rename v1 type (#12205) --- crates/engine/primitives/src/lib.rs | 18 ++++++++++++------ crates/ethereum/engine-primitives/src/lib.rs | 2 +- crates/optimism/node/src/engine.rs | 2 +- crates/rpc/rpc-api/src/engine.rs | 5 ++++- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- examples/custom-engine-types/src/main.rs | 2 +- 6 files changed, 21 insertions(+), 12 deletions(-) diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index fab96b0d17e..949ebf0155c 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -23,7 +23,7 @@ use serde::{de::DeserializeOwned, ser::Serialize}; /// payload job. Hence this trait is also [`PayloadTypes`]. pub trait EngineTypes: PayloadTypes< - BuiltPayload: TryInto + BuiltPayload: TryInto + TryInto + TryInto + TryInto, @@ -31,9 +31,15 @@ pub trait EngineTypes: + Serialize + 'static { - /// Execution Payload V1 type. - type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V2 type. + /// Execution Payload V1 envelope type. + type ExecutionPayloadEnvelopeV1: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V2 envelope type. type ExecutionPayloadEnvelopeV2: DeserializeOwned + Serialize + Clone @@ -41,7 +47,7 @@ pub trait EngineTypes: + Send + Sync + 'static; - /// Execution Payload V3 type. + /// Execution Payload V3 envelope type. type ExecutionPayloadEnvelopeV3: DeserializeOwned + Serialize + Clone @@ -49,7 +55,7 @@ pub trait EngineTypes: + Send + Sync + 'static; - /// Execution Payload V4 type. + /// Execution Payload V4 envelope type. type ExecutionPayloadEnvelopeV4: DeserializeOwned + Serialize + Clone diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 20a55883680..5addf2a18c5 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -43,7 +43,7 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 966d87279c5..7e19b2f93f3 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -39,7 +39,7 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index ddf6d846119..d92173112eb 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -110,7 +110,10 @@ pub trait EngineApi { /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV1")] - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also /// diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index cca9f5d6b64..383da2d21ff 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -281,7 +281,7 @@ where pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { self.inner .payload_store .resolve(payload_id) @@ -775,7 +775,7 @@ where async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); let res = Self::get_payload_v1(self, payload_id).await; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index a48c44b200b..896a4b55f6b 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -161,7 +161,7 @@ impl PayloadTypes for CustomEngineTypes { } impl EngineTypes for CustomEngineTypes { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; From 0c39704950e370c3c97acd4253f52eca5a57a94c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Oct 2024 19:04:44 +0100 Subject: [PATCH 253/970] feat: add missing is active at timestamp fns (#12206) --- crates/optimism/hardforks/src/lib.rs | 21 +++++++++++++++++++-- crates/optimism/node/src/engine.rs | 8 +++----- crates/optimism/payload/src/builder.rs | 19 ++++++------------- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 91c11d3fd23..bac0d0e04ed 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -26,18 +26,35 @@ pub trait OptimismHardforks: EthereumHardforks { self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) } + /// Returns `true` if [`Canyon`](OptimismHardfork::Canyon) is active at given block timestamp. + fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) + } + /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Fjord`](OptimismHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) } /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) } + + /// Returns `true` if [`Holocene`](OptimismHardfork::Holocene) is active at given block + /// timestamp. + fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Regolith`](OptimismHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) + } } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 7e19b2f93f3..da8fde2b4d3 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -14,7 +14,7 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; use reth_optimism_payload_builder::{ builder::decode_eip_1559_params, OptimismBuiltPayload, OptimismPayloadBuilderAttributes, }; @@ -149,10 +149,8 @@ where )) } - if self.chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Holocene, - attributes.payload_attributes.timestamp, - ) { + if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + { let Some(eip_1559_params) = attributes.eip_1559_params else { return Err(EngineObjectValidationError::InvalidParams( "MissingEip1559ParamsInPayloadAttributes".to_string().into(), diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index e9b7e2c76f8..96ac28c5d1a 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -11,7 +11,7 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBl use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; +use reth_optimism_forks::OptimismHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, @@ -193,10 +193,8 @@ where let block_number = initialized_block_env.number.to::(); - let is_regolith = chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Regolith, - attributes.payload_attributes.timestamp, - ); + let is_regolith = + chain_spec.is_regolith_active_at_timestamp(attributes.payload_attributes.timestamp); // apply eip-4788 pre block contract call let mut system_caller = SystemCaller::new(evm_config.clone(), &chain_spec); @@ -315,10 +313,7 @@ where // receipt hashes should be computed when set. The state transition process // ensures this is only set for post-Canyon deposit transactions. deposit_receipt_version: chain_spec - .is_fork_active_at_timestamp( - OptimismHardfork::Canyon, - attributes.payload_attributes.timestamp, - ) + .is_canyon_active_at_timestamp(attributes.payload_attributes.timestamp) .then_some(1), })); @@ -473,10 +468,8 @@ where (None, None) }; - let is_holocene = chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Holocene, - attributes.payload_attributes.timestamp, - ); + let is_holocene = + chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp); if is_holocene { extra_data = attributes From 09c666d676dc857692e883a6b93b87a06524ed36 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Oct 2024 19:36:38 +0100 Subject: [PATCH 254/970] test: add test case for BestTransactions (#12209) --- crates/transaction-pool/src/pool/best.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 6ade15be7d8..36a14edaa23 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -401,6 +401,29 @@ mod tests { assert!(best.next().is_none()); } + #[test] + fn test_best_transactions_iter_invalid() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 10; + // insert 10 gapless tx + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best: Box< + dyn crate::traits::BestTransactions>>, + > = Box::new(pool.best()); + + let tx = best.next().unwrap(); + best.mark_invalid(&tx); + assert!(best.next().is_none()); + } + #[test] fn test_best_with_fees_iter_base_fee_satisfied() { let mut pool = PendingPool::new(MockOrdering::default()); From b42b189210a6a4802dbf97521fee6f4fea8d8e11 Mon Sep 17 00:00:00 2001 From: "0xriazaka.eth" <168359025+0xriazaka@users.noreply.github.com> Date: Wed, 30 Oct 2024 22:45:16 +0100 Subject: [PATCH 255/970] Reth primitives traits tx type (#11720) Co-authored-by: Emilia Hane --- crates/primitives-traits/src/lib.rs | 4 ++++ crates/primitives-traits/src/tx_type.rs | 28 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 crates/primitives-traits/src/tx_type.rs diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 0489a250bbd..9f41bbd47fb 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -45,6 +45,10 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; +/// Transaction types +pub mod tx_type; +pub use tx_type::TxType; + /// Common header types pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs new file mode 100644 index 00000000000..aebf7584fe9 --- /dev/null +++ b/crates/primitives-traits/src/tx_type.rs @@ -0,0 +1,28 @@ +use alloy_eips::eip2718::Eip2718Error; +use alloy_primitives::{U64, U8}; +use alloy_rlp::{Decodable, Encodable}; +use core::fmt::{Debug, Display}; + +/// Trait representing the behavior of a transaction type. +pub trait TxType: + Into + + Into + + PartialEq + + Eq + + PartialEq + + TryFrom + + TryFrom + + TryFrom + + From + + Debug + + Display + + Clone + + Copy + + Default + + Encodable + + Decodable + + Send + + Sync + + 'static +{ +} From e5fc048139ad61f96ed581cb89c5aa4f0da12ac2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Oct 2024 23:43:26 +0100 Subject: [PATCH 256/970] docs: add context truncated input (#12207) --- crates/rpc/rpc/src/otterscan.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 024bdd172fb..a772dd501d4 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -227,7 +227,8 @@ where *transactions = transactions.drain(page_start..page_end).collect::>(); // The input field returns only the 4 bytes method selector instead of the entire - // calldata byte blob. + // calldata byte blob + // See also: for tx in transactions.iter_mut() { if tx.input().len() > 4 { Eth::TransactionCompat::otterscan_api_truncate_input(tx); From c19af293a63fdfee4e92d7dd9a915e40c0a98afd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:33:33 +0700 Subject: [PATCH 257/970] feat: add rlp support for `EthVersion` (#12221) --- crates/net/eth-wire-types/src/status.rs | 22 ++++---- crates/net/eth-wire-types/src/version.rs | 65 ++++++++++++++++++++++++ crates/net/eth-wire/src/errors/eth.rs | 3 +- crates/net/eth-wire/src/ethstream.rs | 12 ++--- crates/net/eth-wire/src/test_utils.rs | 2 +- crates/net/network/tests/it/session.rs | 4 +- examples/manual-p2p/src/main.rs | 3 +- 7 files changed, 89 insertions(+), 22 deletions(-) diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 90e1731c90a..d9e8d4319b5 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -19,7 +19,7 @@ use std::fmt::{Debug, Display}; pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. - pub version: u8, + pub version: EthVersion, /// The chain id, as introduced in /// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids). @@ -50,7 +50,7 @@ impl Status { /// Sets the [`EthVersion`] for the status. pub fn set_eth_version(&mut self, version: EthVersion) { - self.version = version as u8; + self.version = version; } /// Create a [`StatusBuilder`] from the given [`EthChainSpec`] and head block. @@ -122,7 +122,7 @@ impl Default for Status { fn default() -> Self { let mainnet_genesis = MAINNET.genesis_hash(); Self { - version: EthVersion::Eth68 as u8, + version: EthVersion::Eth68, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(17_179_869_184u64), blockhash: mainnet_genesis, @@ -145,7 +145,7 @@ impl Default for Status { /// /// // this is just an example status message! /// let status = Status::builder() -/// .version(EthVersion::Eth66.into()) +/// .version(EthVersion::Eth66) /// .chain(Chain::mainnet()) /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) @@ -156,7 +156,7 @@ impl Default for Status { /// assert_eq!( /// status, /// Status { -/// version: EthVersion::Eth66.into(), +/// version: EthVersion::Eth66, /// chain: Chain::mainnet(), /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), @@ -177,7 +177,7 @@ impl StatusBuilder { } /// Sets the protocol version. - pub const fn version(mut self, version: u8) -> Self { + pub const fn version(mut self, version: EthVersion) -> Self { self.status.version = version; self } @@ -229,7 +229,7 @@ mod tests { fn encode_eth_status_message() { let expected = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( @@ -249,7 +249,7 @@ mod tests { fn decode_eth_status_message() { let data = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let expected = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( @@ -267,7 +267,7 @@ mod tests { fn encode_network_status_message() { let expected = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let status = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -290,7 +290,7 @@ mod tests { fn decode_network_status_message() { let data = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -311,7 +311,7 @@ mod tests { fn decode_another_network_status_message() { let data = hex!("f86142820834936d68fcffffffffffffffffffffffffdeab81b8a0523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0ca06499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bdc6841a67ccd880"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_id(2100), total_difficulty: U256::from_str( "0x000000000000000000000000006d68fcffffffffffffffffffffffffdeab81b8", diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 5a2e0ff9651..40d51cb5518 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -15,6 +15,8 @@ pub struct ParseVersionError(String); /// The `eth` protocol version. #[repr(u8)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Display)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] pub enum EthVersion { /// The `eth` protocol version 66. Eth66 = 66, @@ -64,6 +66,26 @@ impl EthVersion { } } +/// RLP encodes `EthVersion` as a single byte (66-69). +impl Encodable for EthVersion { + fn encode(&self, out: &mut dyn BufMut) { + (*self as u8).encode(out) + } + + fn length(&self) -> usize { + (*self as u8).length() + } +} + +/// RLP decodes a single byte into `EthVersion`. +/// Returns error if byte is not a valid version (66-69). +impl Decodable for EthVersion { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let version = u8::decode(buf)?; + Self::try_from(version).map_err(|_| RlpError::Custom("invalid eth version")) + } +} + /// Allow for converting from a `&str` to an `EthVersion`. /// /// # Example @@ -183,6 +205,8 @@ impl Decodable for ProtocolVersion { #[cfg(test)] mod tests { use super::{EthVersion, ParseVersionError}; + use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use bytes::BytesMut; #[test] fn test_eth_version_try_from_str() { @@ -201,4 +225,45 @@ mod tests { assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); } + + #[test] + fn test_eth_version_rlp_encode() { + let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69]; + + for version in versions { + let mut encoded = BytesMut::new(); + version.encode(&mut encoded); + + assert_eq!(encoded.len(), 1); + assert_eq!(encoded[0], version as u8); + } + } + #[test] + fn test_eth_version_rlp_decode() { + let test_cases = [ + (66_u8, Ok(EthVersion::Eth66)), + (67_u8, Ok(EthVersion::Eth67)), + (68_u8, Ok(EthVersion::Eth68)), + (69_u8, Ok(EthVersion::Eth69)), + (70_u8, Err(RlpError::Custom("invalid eth version"))), + (65_u8, Err(RlpError::Custom("invalid eth version"))), + ]; + + for (input, expected) in test_cases { + let mut encoded = BytesMut::new(); + input.encode(&mut encoded); + + let mut slice = encoded.as_ref(); + let result = EthVersion::decode(&mut slice); + assert_eq!(result, expected); + } + } + + #[test] + fn test_eth_version_total_messages() { + assert_eq!(EthVersion::Eth66.total_messages(), 15); + assert_eq!(EthVersion::Eth67.total_messages(), 13); + assert_eq!(EthVersion::Eth68.total_messages(), 13); + assert_eq!(EthVersion::Eth69.total_messages(), 11); + } } diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 557fbd66a00..1f8b995afda 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -5,6 +5,7 @@ use crate::{ }; use alloy_primitives::B256; use reth_chainspec::Chain; +use reth_eth_wire_types::EthVersion; use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; use std::io; @@ -88,7 +89,7 @@ pub enum EthHandshakeError { MismatchedGenesis(GotExpectedBoxed), #[error("mismatched protocol version in status message: {0}")] /// Mismatched protocol versions in status messages. - MismatchedProtocolVersion(GotExpected), + MismatchedProtocolVersion(GotExpected), #[error("mismatched chain in status message: {0}")] /// Mismatch in chain details in status messages. MismatchedChain(GotExpected), diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 9deca99fb58..74f3fab2be6 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -102,7 +102,7 @@ where return Err(EthStreamError::MessageTooBig(their_msg.len())) } - let version = EthVersion::try_from(status.version)?; + let version = status.version; let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { Ok(m) => m, Err(err) => { @@ -368,7 +368,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -415,7 +415,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)) - U256::from(1), blockhash: B256::random(), @@ -462,7 +462,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)), blockhash: B256::random(), @@ -603,7 +603,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -674,7 +674,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index e516c0aee7d..d7a3aa582b7 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -37,7 +37,7 @@ pub fn eth_handshake() -> (Status, ForkFilter) { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::mainnet(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 6bc029d8a7b..3f74db3d37f 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -33,7 +33,7 @@ async fn test_session_established_with_highest_version() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth68 as u8); + assert_eq!(status.version, EthVersion::Eth68); } ev => { panic!("unexpected event {ev:?}") @@ -71,7 +71,7 @@ async fn test_session_established_with_different_capability() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth66 as u8); + assert_eq!(status.version, EthVersion::Eth66); } ev => { panic!("unexpected event: {ev:?}") diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 857a8a1c126..79a2ff26a27 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -106,7 +106,8 @@ async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthSt .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); - let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; + let status = + Status { version: p2p_stream.shared_capabilities().eth()?.version().try_into()?, ..status }; let eth_unauthed = UnauthedEthStream::new(p2p_stream); Ok(eth_unauthed.handshake(status, fork_filter).await?) } From 9659717e83b58d2655f5f35a714c8ff8d9366f6c Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 31 Oct 2024 02:34:11 -0600 Subject: [PATCH 258/970] renamed OptimismPayloadBuilderAttributes to OpPayloadBuilderAttributes (#12213) --- crates/optimism/node/src/engine.rs | 4 ++-- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/tests/e2e/utils.rs | 8 ++++---- crates/optimism/payload/src/builder.rs | 12 ++++++------ crates/optimism/payload/src/lib.rs | 2 +- crates/optimism/payload/src/payload.rs | 18 ++++++++---------- 6 files changed, 22 insertions(+), 24 deletions(-) diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index da8fde2b4d3..27a609d953a 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -16,7 +16,7 @@ use reth_node_api::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; use reth_optimism_payload_builder::{ - builder::decode_eip_1559_params, OptimismBuiltPayload, OptimismPayloadBuilderAttributes, + builder::decode_eip_1559_params, OpPayloadBuilderAttributes, OptimismBuiltPayload, }; /// The types used in the optimism beacon consensus engine. @@ -53,7 +53,7 @@ pub struct OptimismPayloadTypes; impl PayloadTypes for OptimismPayloadTypes { type BuiltPayload = OptimismBuiltPayload; type PayloadAttributes = OpPayloadAttributes; - type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } /// Validator for Optimism engine API. diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 768f4d94efd..a8ef472da76 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -23,7 +23,7 @@ pub use node::OptimismNode; pub mod txpool; pub use reth_optimism_payload_builder::{ - OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, + OpPayloadBuilderAttributes, OptimismBuiltPayload, OptimismPayloadBuilder, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index d4219b0fea1..b445af33b4e 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -6,7 +6,7 @@ use reth_e2e_test_utils::{ }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ - node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, + node::OptimismAddOns, OpPayloadBuilderAttributes, OptimismBuiltPayload, OptimismNode, }; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; @@ -31,7 +31,7 @@ pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, -) -> eyre::Result> { +) -> eyre::Result> { node.advance(length as u64, |_| { let wallet = wallet.clone(); Box::pin(async move { @@ -49,7 +49,7 @@ pub(crate) async fn advance_chain( } /// Helper function to create a new eth payload attributes -pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { +pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { let attributes = PayloadAttributes { timestamp, prev_randao: B256::ZERO, @@ -58,7 +58,7 @@ pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuil parent_beacon_block_root: Some(B256::ZERO), }; - OptimismPayloadBuilderAttributes { + OpPayloadBuilderAttributes { payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), transactions: vec![], no_tx_pool: false, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 96ac28c5d1a..3095ce3512e 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -33,7 +33,7 @@ use tracing::{debug, trace, warn}; use crate::{ error::OptimismPayloadBuilderError, - payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, + payload::{OpPayloadBuilderAttributes, OptimismBuiltPayload}, }; use op_alloy_consensus::DepositTransaction; @@ -77,7 +77,7 @@ where /// (that has the `parent` as its parent). pub fn cfg_and_block_env( &self, - config: &PayloadConfig, + config: &PayloadConfig, parent: &Header, ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { @@ -96,12 +96,12 @@ where Pool: TransactionPool, EvmConfig: ConfigureEvm

, { - type Attributes = OptimismPayloadBuilderAttributes; + type Attributes = OpPayloadBuilderAttributes; type BuiltPayload = OptimismBuiltPayload; fn try_build( &self, - args: BuildArguments, + args: BuildArguments, ) -> Result, PayloadBuilderError> { let (cfg_env, block_env) = self .cfg_and_block_env(&args.config, &args.config.parent_header) @@ -111,7 +111,7 @@ where fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -154,7 +154,7 @@ where #[inline] pub(crate) fn optimism_payload( evm_config: &EvmConfig, - args: BuildArguments, + args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, _compute_pending_block: bool, diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index c06b49c5376..1c7bcaf7076 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -15,4 +15,4 @@ pub mod builder; pub use builder::OptimismPayloadBuilder; pub mod error; pub mod payload; -pub use payload::{OpPayloadAttributes, OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +pub use payload::{OpPayloadAttributes, OpPayloadBuilderAttributes, OptimismBuiltPayload}; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 056edfe7b63..ecfebdf00b9 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -25,7 +25,7 @@ use std::sync::Arc; /// Optimism Payload Builder Attributes #[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct OptimismPayloadBuilderAttributes { +pub struct OpPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, /// `NoTxPool` option for the generated payload @@ -39,7 +39,7 @@ pub struct OptimismPayloadBuilderAttributes { pub eip_1559_params: Option, } -impl OptimismPayloadBuilderAttributes { +impl OpPayloadBuilderAttributes { /// Extracts the `eip1559` parameters for the payload. pub fn get_holocene_extra_data( &self, @@ -73,7 +73,7 @@ impl OptimismPayloadBuilderAttributes { } } -impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; type Error = alloy_rlp::Error; @@ -169,7 +169,7 @@ pub struct OptimismBuiltPayload { /// The rollup's chainspec. pub(crate) chain_spec: Arc, /// The payload attributes. - pub(crate) attributes: OptimismPayloadBuilderAttributes, + pub(crate) attributes: OpPayloadBuilderAttributes, } // === impl BuiltPayload === @@ -181,7 +181,7 @@ impl OptimismBuiltPayload { block: SealedBlock, fees: U256, chain_spec: Arc, - attributes: OptimismPayloadBuilderAttributes, + attributes: OpPayloadBuilderAttributes, executed_block: Option, ) -> Self { Self { id, block, executed_block, fees, sidecars: Vec::new(), chain_spec, attributes } @@ -411,7 +411,7 @@ mod tests { #[test] fn test_get_extra_data_post_holocene() { - let attributes = OptimismPayloadBuilderAttributes { + let attributes = OpPayloadBuilderAttributes { eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), ..Default::default() }; @@ -421,10 +421,8 @@ mod tests { #[test] fn test_get_extra_data_post_holocene_default() { - let attributes = OptimismPayloadBuilderAttributes { - eip_1559_params: Some(B64::ZERO), - ..Default::default() - }; + let attributes = + OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() }; let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); } From 66cc619128450a0368d6814a177f3b812758618a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Oct 2024 09:34:31 +0100 Subject: [PATCH 259/970] chore: rm deprecated txpool fn (#12198) --- crates/transaction-pool/src/lib.rs | 7 ------- crates/transaction-pool/src/noop.rs | 7 ------- crates/transaction-pool/src/traits.rs | 10 ---------- 3 files changed, 24 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 3a5e547ba4e..f8f06b805f9 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -430,13 +430,6 @@ where Box::new(self.pool.best_transactions()) } - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>> { - self.pool.best_transactions_with_attributes(BestTransactionsAttributes::base_fee(base_fee)) - } - fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 4f4e5a3813a..0f87f06f7dd 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -150,13 +150,6 @@ impl TransactionPool for NoopTransactionPool { Box::new(std::iter::empty()) } - fn best_transactions_with_base_fee( - &self, - _: u64, - ) -> Box>>> { - Box::new(std::iter::empty()) - } - fn best_transactions_with_attributes( &self, _: BestTransactionsAttributes, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 1b300415492..0d8f6dbb54a 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -248,16 +248,6 @@ pub trait TransactionPool: Send + Sync + Clone { &self, ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the - /// given base fee. - /// - /// Consumer: Block production - #[deprecated(note = "Use best_transactions_with_attributes instead.")] - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the /// given base fee and optional blob fee attributes. /// From 41044a2601aab03a76838921ff92009debeb27e3 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:07:20 -0600 Subject: [PATCH 260/970] Apply beacon system call to trace_block (#12030) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/debug.rs | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 6da03b04675..8b9e1602321 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -98,6 +98,7 @@ where cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, + parent_beacon_block_root: Option, ) -> Result, Eth::Error> { if transactions.is_empty() { // nothing to trace @@ -111,6 +112,26 @@ where let block_hash = at.as_block_hash(); let mut results = Vec::with_capacity(transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + let mut system_caller = SystemCaller::new( + RpcNodeCore::evm_config(this.eth_api()).clone(), + RpcNodeCore::provider(this.eth_api()).chain_spec(), + ); + + // apply relevant system calls + system_caller + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), + ) + })?; + let mut transactions = transactions.into_iter().enumerate().peekable(); let mut inspector = None; while let Some((index, tx)) = transactions.next() { @@ -170,6 +191,9 @@ where // we trace on top the block's parent block let parent = block.parent_hash; + // we need the beacon block root for a system call + let parent_beacon_block_root = block.parent_beacon_block_root; + // Depending on EIP-2 we need to recover the transactions differently let transactions = if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) { @@ -196,7 +220,15 @@ where .collect::, Eth::Error>>()? }; - self.trace_block(parent.into(), transactions, cfg, block_env, opts).await + self.trace_block( + parent.into(), + transactions, + cfg, + block_env, + opts, + parent_beacon_block_root, + ) + .await } /// Replays a block and returns the trace of each transaction. @@ -228,6 +260,7 @@ where cfg, block_env, opts, + block.parent_beacon_block_root, ) .await } From 76c5aef911d909161be47113cd88b7ea36fafffd Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 31 Oct 2024 11:53:59 +0100 Subject: [PATCH 261/970] fix(trie): move to sibling on invalid tree mask (#12193) Co-authored-by: Federico Gimenez --- crates/trie/trie/src/metrics.rs | 22 +++++++++++++++++++++- crates/trie/trie/src/walker.rs | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/crates/trie/trie/src/metrics.rs b/crates/trie/trie/src/metrics.rs index 7582f37418d..006dc7e3655 100644 --- a/crates/trie/trie/src/metrics.rs +++ b/crates/trie/trie/src/metrics.rs @@ -1,5 +1,5 @@ use crate::stats::TrieStats; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// Wrapper for state root metrics. @@ -63,3 +63,23 @@ impl TrieType { } } } + +/// Metrics for trie walker +#[derive(Clone, Metrics)] +#[metrics(scope = "trie.walker")] +pub struct WalkerMetrics { + /// The number of subnodes out of order due to wrong tree mask. + out_of_order_subnode: Counter, +} + +impl WalkerMetrics { + /// Create new metrics for the given trie type. + pub fn new(ty: TrieType) -> Self { + Self::new_with_labels(&[("type", ty.as_str())]) + } + + /// Increment `out_of_order_subnode`. + pub fn inc_out_of_order_subnode(&self, amount: u64) { + self.out_of_order_subnode.increment(amount); + } +} diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index e75a96d0f1f..aaff293b379 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -7,6 +7,9 @@ use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; use std::collections::HashSet; +#[cfg(feature = "metrics")] +use crate::metrics::WalkerMetrics; + /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches /// if they have not changed. @@ -24,13 +27,23 @@ pub struct TrieWalker { pub changes: PrefixSet, /// The retained trie node keys that need to be removed. removed_keys: Option>, + #[cfg(feature = "metrics")] + /// Walker metrics. + metrics: WalkerMetrics, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { - let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; + let mut this = Self { + cursor, + changes, + stack, + can_skip_current_node: false, + removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), + }; this.update_skip_node(); this } @@ -113,6 +126,8 @@ impl TrieWalker { stack: vec![CursorSubNode::default()], can_skip_current_node: false, removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), }; // Set up the root node of the trie in the stack, if it exists. @@ -179,6 +194,19 @@ impl TrieWalker { self.stack[0].set_nibble(key[0] as i8); } + // The current tree mask might have been set incorrectly. + // Sanity check that the newly retrieved trie node key is the child of the last item + // on the stack. If not, advance to the next sibling instead of adding the node to the + // stack. + if let Some(subnode) = self.stack.last() { + if !key.starts_with(subnode.full_key()) { + #[cfg(feature = "metrics")] + self.metrics.inc_out_of_order_subnode(1); + self.move_to_next_sibling(false)?; + return Ok(()) + } + } + // Create a new CursorSubNode and push it to the stack. let subnode = CursorSubNode::new(key, Some(node)); let nibble = subnode.nibble(); From 1f1dcc950dc2f1b3bee5a562a1c2c7cd9db04fff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Oct 2024 14:45:55 +0100 Subject: [PATCH 262/970] chore: simplify SystemCaller setup (#12223) --- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 12 +---- crates/rpc/rpc/src/debug.rs | 60 ++++++++++----------- 2 files changed, 30 insertions(+), 42 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index da1d1cdb919..fa70b2df2ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -195,11 +195,7 @@ pub trait Trace: LoadState> { let block_txs = block.transactions_with_sender(); // apply relevant system calls - let mut system_caller = SystemCaller::new( - this.evm_config().clone(), - RpcNodeCore::provider(&this).chain_spec(), - ); - system_caller + SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) .pre_block_beacon_root_contract_call( &mut db, &cfg, @@ -338,11 +334,7 @@ pub trait Trace: LoadState> { CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); // apply relevant system calls - let mut system_caller = SystemCaller::new( - this.evm_config().clone(), - RpcNodeCore::provider(&this).chain_spec(), - ); - system_caller + SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) .pre_block_beacon_root_contract_call( &mut db, &cfg, diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 8b9e1602321..e2746a53cd0 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -113,24 +113,22 @@ where let mut results = Vec::with_capacity(transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut system_caller = SystemCaller::new( - RpcNodeCore::evm_config(this.eth_api()).clone(), - RpcNodeCore::provider(this.eth_api()).chain_spec(), - ); - // apply relevant system calls - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, + SystemCaller::new( + this.eth_api().evm_config().clone(), + this.eth_api().provider().chain_spec(), + ) + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + })?; let mut transactions = transactions.into_iter().enumerate().peekable(); let mut inspector = None; @@ -296,23 +294,21 @@ where let mut db = CacheDB::new(StateProviderDatabase::new(state)); // apply relevant system calls - let mut system_caller = SystemCaller::new( - RpcNodeCore::evm_config(this.eth_api()).clone(), - RpcNodeCore::provider(this.eth_api()).chain_spec(), - ); - - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, + SystemCaller::new( + this.eth_api().evm_config().clone(), + this.eth_api().provider().chain_spec(), + ) + .pre_block_beacon_root_contract_call( + &mut db, + &cfg, + &block_env, + parent_beacon_block_root, + ) + .map_err(|_| { + EthApiError::EvmCustom( + "failed to apply 4788 beacon root system call".to_string(), ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + })?; // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( From 460e26cc51c1902cac88b7287c7491976478ca18 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 31 Oct 2024 09:51:33 -0600 Subject: [PATCH 263/970] renamed OptimismBuiltPayload to OpBuiltPayload (#12227) --- crates/optimism/node/src/engine.rs | 4 ++-- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/tests/e2e/utils.rs | 4 ++-- crates/optimism/payload/src/builder.rs | 18 +++++++-------- crates/optimism/payload/src/lib.rs | 2 +- crates/optimism/payload/src/payload.rs | 30 ++++++++++++------------- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 27a609d953a..0f48dc34706 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -16,7 +16,7 @@ use reth_node_api::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; use reth_optimism_payload_builder::{ - builder::decode_eip_1559_params, OpPayloadBuilderAttributes, OptimismBuiltPayload, + builder::decode_eip_1559_params, OpBuiltPayload, OpPayloadBuilderAttributes, }; /// The types used in the optimism beacon consensus engine. @@ -51,7 +51,7 @@ where pub struct OptimismPayloadTypes; impl PayloadTypes for OptimismPayloadTypes { - type BuiltPayload = OptimismBuiltPayload; + type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index a8ef472da76..9bc15ef2668 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -23,7 +23,7 @@ pub use node::OptimismNode; pub mod txpool; pub use reth_optimism_payload_builder::{ - OpPayloadBuilderAttributes, OptimismBuiltPayload, OptimismPayloadBuilder, + OpBuiltPayload, OpPayloadBuilderAttributes, OptimismPayloadBuilder, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index b445af33b4e..16eb974914d 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -6,7 +6,7 @@ use reth_e2e_test_utils::{ }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ - node::OptimismAddOns, OpPayloadBuilderAttributes, OptimismBuiltPayload, OptimismNode, + node::OptimismAddOns, OpBuiltPayload, OpPayloadBuilderAttributes, OptimismNode, }; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; @@ -31,7 +31,7 @@ pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, -) -> eyre::Result> { +) -> eyre::Result> { node.advance(length as u64, |_| { let wallet = wallet.clone(); Box::pin(async move { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 3095ce3512e..a1fa3d47beb 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -33,7 +33,7 @@ use tracing::{debug, trace, warn}; use crate::{ error::OptimismPayloadBuilderError, - payload::{OpPayloadBuilderAttributes, OptimismBuiltPayload}, + payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; @@ -97,12 +97,12 @@ where EvmConfig: ConfigureEvm
, { type Attributes = OpPayloadBuilderAttributes; - type BuiltPayload = OptimismBuiltPayload; + type BuiltPayload = OpBuiltPayload; fn try_build( &self, - args: BuildArguments, - ) -> Result, PayloadBuilderError> { + args: BuildArguments, + ) -> Result, PayloadBuilderError> { let (cfg_env, block_env) = self .cfg_and_block_env(&args.config, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; @@ -111,7 +111,7 @@ where fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -124,7 +124,7 @@ where &self, client: &Client, config: PayloadConfig, - ) -> Result { + ) -> Result { let args = BuildArguments { client, config, @@ -154,11 +154,11 @@ where #[inline] pub(crate) fn optimism_payload( evm_config: &EvmConfig, - args: BuildArguments, + args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, _compute_pending_block: bool, -) -> Result, PayloadBuilderError> +) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, @@ -523,7 +523,7 @@ where let no_tx_pool = attributes.no_tx_pool; - let payload = OptimismBuiltPayload::new( + let payload = OpBuiltPayload::new( attributes.payload_attributes.id, sealed_block, total_fees, diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 1c7bcaf7076..e1f55e51bad 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -15,4 +15,4 @@ pub mod builder; pub use builder::OptimismPayloadBuilder; pub mod error; pub mod payload; -pub use payload::{OpPayloadAttributes, OpPayloadBuilderAttributes, OptimismBuiltPayload}; +pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index ecfebdf00b9..3a7d87acc4c 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -154,7 +154,7 @@ impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { /// Contains the built payload. #[derive(Debug, Clone)] -pub struct OptimismBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block @@ -174,7 +174,7 @@ pub struct OptimismBuiltPayload { // === impl BuiltPayload === -impl OptimismBuiltPayload { +impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, @@ -208,7 +208,7 @@ impl OptimismBuiltPayload { } } -impl BuiltPayload for OptimismBuiltPayload { +impl BuiltPayload for OpBuiltPayload { fn block(&self) -> &SealedBlock { &self.block } @@ -226,7 +226,7 @@ impl BuiltPayload for OptimismBuiltPayload { } } -impl BuiltPayload for &OptimismBuiltPayload { +impl BuiltPayload for &OpBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } @@ -245,24 +245,24 @@ impl BuiltPayload for &OptimismBuiltPayload { } // V1 engine_getPayloadV1 response -impl From for ExecutionPayloadV1 { - fn from(value: OptimismBuiltPayload) -> Self { +impl From for ExecutionPayloadV1 { + fn from(value: OpBuiltPayload) -> Self { block_to_payload_v1(value.block) } } // V2 engine_getPayloadV2 response -impl From for ExecutionPayloadEnvelopeV2 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, .. } = value; +impl From for ExecutionPayloadEnvelopeV2 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, .. } = value; Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } } } -impl From for OpExecutionPayloadEnvelopeV3 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV3 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -287,9 +287,9 @@ impl From for OpExecutionPayloadEnvelopeV3 { } } } -impl From for OpExecutionPayloadEnvelopeV4 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV4 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { From 219def95821b6ba631e8e1457aca86acdc3589eb Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 31 Oct 2024 17:21:06 +0100 Subject: [PATCH 264/970] chore(ci): pin kurtosis to working version (#12225) --- .github/workflows/kurtosis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 74d26dbd3ee..43f5c3605ab 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -79,6 +79,7 @@ jobs: - name: Run kurtosis uses: ethpandaops/kurtosis-assertoor-github-action@v1 with: + kurtosis_version: 1.3.1 ethereum_package_args: '.github/assets/kurtosis_network_params.yaml' notify-on-error: From d555f9ef3a057c9143c7cf7b4ea69eb30859295d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:21:55 -0400 Subject: [PATCH 265/970] chore(book): fix engine api typo (#12231) --- book/run/mainnet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 4412f51c7bf..6f1ec144df8 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -83,4 +83,4 @@ In the meantime, consider setting up [observability](./observability.md) to moni ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending a `engine_forkChoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. From 998b3b3d3a57b3a567166dda57c4f2d856539ec3 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:21:38 -0600 Subject: [PATCH 266/970] renamed OptimismPayloadBuilder to OpPayloadBuilder (#12234) --- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/src/node.rs | 17 ++++++++--------- crates/optimism/payload/src/builder.rs | 12 ++++++------ crates/optimism/payload/src/lib.rs | 2 +- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 9bc15ef2668..ff25e7173a6 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -23,7 +23,7 @@ pub use node::OptimismNode; pub mod txpool; pub use reth_optimism_payload_builder::{ - OpBuiltPayload, OpPayloadBuilderAttributes, OptimismPayloadBuilder, + OpBuiltPayload, OpPayloadBuilder, OpPayloadBuilderAttributes, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 4328a55fb15..8e06dbcc500 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -67,7 +67,7 @@ impl OptimismNode { ) -> ComponentsBuilder< Node, OptimismPoolBuilder, - OptimismPayloadBuilder, + OpPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, @@ -81,7 +81,7 @@ impl OptimismNode { ComponentsBuilder::default() .node_types::() .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) + .payload(OpPayloadBuilder::new(compute_pending_block)) .network(OptimismNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, @@ -100,7 +100,7 @@ where type ComponentsBuilder = ComponentsBuilder< N, OptimismPoolBuilder, - OptimismPayloadBuilder, + OpPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, @@ -288,7 +288,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -300,7 +300,7 @@ pub struct OptimismPayloadBuilder { pub compute_pending_block: bool, } -impl OptimismPayloadBuilder { +impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag. pub const fn new(compute_pending_block: bool) -> Self { Self { compute_pending_block } @@ -320,9 +320,8 @@ impl OptimismPayloadBuilder { Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, { - let payload_builder = - reth_optimism_payload_builder::OptimismPayloadBuilder::new(evm_config) - .set_compute_pending_block(self.compute_pending_block); + let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) + .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -348,7 +347,7 @@ impl OptimismPayloadBuilder { } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OpPayloadBuilder where Node: FullNodeTypes< Types: NodeTypesWithEngine, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index a1fa3d47beb..09a443d0f1d 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -39,7 +39,7 @@ use op_alloy_consensus::DepositTransaction; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, @@ -47,8 +47,8 @@ pub struct OptimismPayloadBuilder { pub evm_config: EvmConfig, } -impl OptimismPayloadBuilder { - /// `OptimismPayloadBuilder` constructor. +impl OpPayloadBuilder { + /// `OpPayloadBuilder` constructor. pub const fn new(evm_config: EvmConfig) -> Self { Self { compute_pending_block: true, evm_config } } @@ -69,7 +69,7 @@ impl OptimismPayloadBuilder { self.compute_pending_block } } -impl OptimismPayloadBuilder +impl OpPayloadBuilder where EvmConfig: ConfigureEvmEnv
, { @@ -89,8 +89,8 @@ where } } -/// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. -impl PayloadBuilder for OptimismPayloadBuilder +/// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. +impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index e1f55e51bad..8447026d783 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -12,7 +12,7 @@ #![cfg(feature = "optimism")] pub mod builder; -pub use builder::OptimismPayloadBuilder; +pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; From d020b41f6aa053dd6a13d3647f7ad708ee35dc66 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 31 Oct 2024 23:22:42 +0400 Subject: [PATCH 267/970] feat: `flashbots_validateBuilderSubmissionV3` (#12168) --- Cargo.lock | 8 + book/cli/reth/node.md | 3 + crates/cli/util/Cargo.toml | 1 + crates/cli/util/src/parsers.rs | 7 + crates/e2e-test-utils/src/lib.rs | 8 +- crates/ethereum/consensus/src/lib.rs | 4 +- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/tests/e2e/rpc.rs | 86 +++- crates/node/builder/src/rpc.rs | 3 + crates/node/core/src/args/rpc_server.rs | 14 + crates/payload/validator/src/lib.rs | 4 +- crates/rpc/rpc-api/Cargo.toml | 2 + crates/rpc/rpc-api/src/lib.rs | 2 +- crates/rpc/rpc-api/src/validation.rs | 21 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/src/config.rs | 10 +- crates/rpc/rpc-builder/src/lib.rs | 322 +++++++++++--- crates/rpc/rpc-builder/tests/it/utils.rs | 3 + .../rpc/rpc-eth-types/src/builder/config.rs | 2 +- crates/rpc/rpc-server-types/src/result.rs | 2 + crates/rpc/rpc/Cargo.toml | 5 +- crates/rpc/rpc/src/lib.rs | 2 +- crates/rpc/rpc/src/validation.rs | 400 ++++++++++++++++-- examples/rpc-db/src/main.rs | 6 +- 24 files changed, 801 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27581ba3a80..5fc3574ebc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6697,6 +6697,7 @@ dependencies = [ "rand 0.8.5", "reth-fs-util", "secp256k1", + "serde", "thiserror", "tikv-jemallocator", "tracy-client", @@ -7997,6 +7998,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-provider", + "alloy-rpc-types-beacon", "alloy-signer", "alloy-sol-types", "eyre", @@ -8626,13 +8628,16 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-chainspec", + "reth-consensus", "reth-consensus-common", "reth-errors", + "reth-ethereum-consensus", "reth-evm", "reth-evm-ethereum", "reth-network-api", "reth-network-peers", "reth-network-types", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", @@ -8682,6 +8687,8 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-rpc-eth-api", + "serde", + "serde_with", ] [[package]] @@ -8721,6 +8728,7 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-chainspec", + "reth-consensus", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 52f597279f0..5f0090ef896 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -367,6 +367,9 @@ RPC: [default: 25] + --builder.disallow + Path to file containing disallowed addresses, json-encoded list of strings. Block validation API will reject blocks containing transactions from these addresses + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index d96a882a672..70515f83b4b 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -24,6 +24,7 @@ eyre.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } thiserror.workspace = true +serde.workspace = true tracy-client = { workspace = true, optional = true, features = ["demangle"] } diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 202744a4bb7..9bb803bcca8 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -1,7 +1,9 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; +use reth_fs_util::FsPathError; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + path::Path, str::FromStr, time::Duration, }; @@ -82,6 +84,11 @@ pub fn parse_socket_address(value: &str) -> eyre::Result(path: &str) -> Result { + reth_fs_util::read_json_file(Path::new(path)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 1e9717d082c..1e9b39058e6 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -7,6 +7,7 @@ use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, + rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -147,7 +148,12 @@ where let node_config = NodeConfig::new(chain_spec.clone()) .with_network(network_config.clone()) .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) .set_dev(is_dev); let span = span!(Level::INFO, "node", idx); diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index dd286584a59..07c2a71e8cf 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -24,7 +24,7 @@ use reth_primitives::{ use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. -const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; +pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; mod validation; pub use validation::validate_block_post_execution; @@ -32,7 +32,7 @@ pub use validation::validate_block_post_execution; /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 83b62034740..49663ffc2cc 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -59,6 +59,7 @@ alloy-signer.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true +alloy-rpc-types-beacon.workspace = true [features] default = [] diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index ddf3d5cba2a..c8b127b9b7f 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,8 +1,14 @@ use crate::utils::eth_payload_attributes; -use alloy_eips::calc_next_block_base_fee; -use alloy_primitives::U256; -use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder}; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_primitives::{Address, B256, U256}; +use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; +use alloy_rpc_types_beacon::relay::{BidTrace, SignedBidSubmissionV3}; use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth::rpc::{ + api::BuilderBlockValidationRequestV3, + compat::engine::payload::block_to_payload_v3, + types::{engine::BlobsBundleV1, TransactionRequest}, +}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; use reth_node_ethereum::EthereumNode; @@ -107,3 +113,77 @@ async fn test_fee_history() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn test_flashbots_validate() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV3 { + request: SignedBidSubmissionV3 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_ok()); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + Ok(()) +} diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 4c1ea32d045..8819aa4ac4f 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -198,6 +198,7 @@ pub struct RpcRegistry { Node::Provider, EthApi, Node::Executor, + Node::Consensus, >, } @@ -214,6 +215,7 @@ where Node::Provider, EthApi, Node::Executor, + Node::Consensus, >; fn deref(&self) -> &Self::Target { @@ -442,6 +444,7 @@ where .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) + .with_consensus(node.consensus().clone()) .build_with_auth_server(module_config, engine_api, eth_api_builder); // in dev mode we generate 20 random dev-signer accounts diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 382f22d3776..fe9b80cec47 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,11 +1,13 @@ //! clap [Args](clap::Args) for RPC related arguments. use std::{ + collections::HashSet, ffi::OsStr, net::{IpAddr, Ipv4Addr}, path::PathBuf, }; +use alloy_primitives::Address; use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -183,6 +185,11 @@ pub struct RpcServerArgs { #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] pub rpc_proof_permits: usize, + /// Path to file containing disallowed addresses, json-encoded list of strings. Block + /// validation API will reject blocks containing transactions from these addresses. + #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>)] + pub builder_disallow: Option>, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -199,6 +206,12 @@ impl RpcServerArgs { self } + /// Configures modules for the HTTP-RPC server. + pub fn with_http_api(mut self, http_api: RpcModuleSelection) -> Self { + self.http_api = Some(http_api); + self + } + /// Enables the WS-RPC server. pub const fn with_ws(mut self) -> Self { self.ws = true; @@ -318,6 +331,7 @@ impl Default for RpcServerArgs { gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, + builder_disallow: Default::default(), } } } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 9952815fd98..38e53bac42a 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -23,7 +23,7 @@ pub struct ExecutionPayloadValidator { chain_spec: Arc, } -impl ExecutionPayloadValidator { +impl ExecutionPayloadValidator { /// Create a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -34,7 +34,9 @@ impl ExecutionPayloadValidator { pub fn chain_spec(&self) -> &ChainSpec { &self.chain_spec } +} +impl ExecutionPayloadValidator { /// Returns true if the Cancun hardfork is active at the given timestamp. #[inline] fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 363e2295530..60146e8b2c2 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -36,6 +36,8 @@ alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } +serde.workspace = true +serde_with.workspace = true [features] client = [ diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 73775112dcf..63e6e54466d 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -46,7 +46,7 @@ pub mod servers { rpc::RpcApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, - validation::BlockSubmissionValidationApiServer, + validation::{BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3}, web3::Web3ApiServer, }; pub use reth_rpc_eth_api::{ diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index e1819dde440..d8f55b668c9 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -1,9 +1,28 @@ //! API for block submission validation. +use alloy_primitives::B256; use alloy_rpc_types_beacon::relay::{ - BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, SignedBidSubmissionV3, }; use jsonrpsee::proc_macros::rpc; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; + +/// A Request to validate a [`SignedBidSubmissionV3`] +/// +/// +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BuilderBlockValidationRequestV3 { + /// The request to be validated. + #[serde(flatten)] + pub request: SignedBidSubmissionV3, + /// The registered gas limit for the validation request. + #[serde_as(as = "DisplayFromStr")] + pub registered_gas_limit: u64, + /// The parent beacon block root for the validation request. + pub parent_beacon_block_root: B256, +} /// Block validation rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index b9b511a078b..711e4438133 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-ipc.workspace = true reth-chainspec.workspace = true +reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 4ff98ae8d50..daff81fa2ae 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -2,6 +2,7 @@ use std::{net::SocketAddr, path::PathBuf}; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; +use reth_rpc::ValidationApiConfig; use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; @@ -27,6 +28,9 @@ pub trait RethRpcServerConfig { /// The configured ethereum RPC settings. fn eth_config(&self) -> EthConfig; + /// The configured ethereum RPC settings. + fn flashbots_config(&self) -> ValidationApiConfig; + /// Returns state cache configuration. fn state_cache_config(&self) -> EthStateCacheConfig; @@ -101,6 +105,10 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) } + fn flashbots_config(&self) -> ValidationApiConfig { + ValidationApiConfig { disallow: self.builder_disallow.clone().unwrap_or_default() } + } + fn state_cache_config(&self) -> EthStateCacheConfig { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, @@ -124,7 +132,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() - .with_config(RpcModuleConfig::new(self.eth_config())); + .with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config())); if self.http { config = config.with_http( diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 787dce08b8d..5d5fd31aab8 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -27,13 +27,14 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! -//! pub async fn launch( +//! pub async fn launch( //! provider: Provider, //! pool: Pool, //! network: Network, //! events: Events, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, @@ -41,6 +42,7 @@ //! Events: CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -57,6 +59,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ) //! .build(transports, Box::new(EthApi::with_spawner)); //! let handle = RpcServerConfig::default() @@ -95,6 +98,7 @@ //! EngineT, //! EvmConfig, //! BlockExecutor, +//! Consensus, //! >( //! provider: Provider, //! pool: Pool, @@ -103,6 +107,7 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, @@ -112,6 +117,7 @@ //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -128,6 +134,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ); //! //! // configure the server modules @@ -155,6 +162,7 @@ use std::{ collections::HashMap, fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -170,6 +178,7 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; +use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; @@ -180,7 +189,7 @@ use reth_provider::{ }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, ValidationApi, Web3Api, + TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -243,6 +252,7 @@ pub async fn launch, block_executor: BlockExecutor, + consensus: Arc, ) -> Result where Provider: FullRpcProvider + AccountReader + ChangeSetReader, @@ -266,6 +276,7 @@ where events, evm_config, block_executor, + consensus, ) .build(module_config, eth), ) @@ -276,7 +287,16 @@ where /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder< + Provider, + Pool, + Network, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, +> { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -291,14 +311,17 @@ pub struct RpcModuleBuilder - RpcModuleBuilder +impl + RpcModuleBuilder { /// Create a new instance of the builder + #[allow(clippy::too_many_arguments)] pub const fn new( provider: Provider, pool: Pool, @@ -307,32 +330,54 @@ impl events: Events, evm_config: EvmConfig, block_executor: BlockExecutor, + consensus: Consensus, ) -> Self { - Self { provider, pool, network, executor, events, evm_config, block_executor } + Self { provider, pool, network, executor, events, evm_config, block_executor, consensus } } /// Configure the provider instance. pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, { - let Self { pool, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, executor, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the transaction pool instance. pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: TransactionPool + 'static, { - let Self { provider, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, network, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopTransactionPool`] instance. @@ -350,8 +395,11 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { provider, executor, events, network, evm_config, block_executor, .. } = self; + let Self { + provider, executor, events, network, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, executor, @@ -360,6 +408,7 @@ impl evm_config, block_executor, pool: NoopTransactionPool::default(), + consensus, } } @@ -367,12 +416,23 @@ impl pub fn with_network( self, network: N, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where N: NetworkInfo + Peers + 'static, { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopNetwork`] instance. @@ -382,9 +442,19 @@ impl /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder - { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; + ) -> RpcModuleBuilder< + Provider, + Pool, + NoopNetwork, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, + > { + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, pool, @@ -393,6 +463,7 @@ impl network: NoopNetwork::default(), evm_config, block_executor, + consensus, } } @@ -400,12 +471,22 @@ impl pub fn with_executor( self, executor: T, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where T: TaskSpawner + 'static, { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure [`TokioTaskExecutor`] as the task executor to use for additional tasks. @@ -422,8 +503,10 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; RpcModuleBuilder { provider, network, @@ -432,6 +515,7 @@ impl executor: TokioTaskExecutor::default(), evm_config, block_executor, + consensus, } } @@ -439,41 +523,90 @@ impl pub fn with_events( self, events: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: CanonStateSubscriptions + 'static, { - let Self { provider, pool, executor, network, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, network, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the evm configuration type pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: ConfigureEvm + 'static, { - let Self { provider, pool, executor, network, events, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, pool, executor, network, events, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the block executor provider pub fn with_block_executor( self, block_executor: BE, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where BE: BlockExecutorProvider, { - let Self { provider, network, pool, executor, events, evm_config, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, network, pool, executor, events, evm_config, consensus, .. } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } + } + + /// Configure the consensus implementation. + pub fn with_consensus( + self, + consensus: C, + ) -> RpcModuleBuilder { + let Self { provider, network, pool, executor, events, evm_config, block_executor, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -482,6 +615,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm

, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -498,14 +632,23 @@ where ) -> ( TransportRpcModules, AuthRpcModule, - RpcRegistryInner, + RpcRegistryInner, ) where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; let config = module_config.config.clone().unwrap_or_default(); @@ -515,6 +658,7 @@ where network, executor, events, + consensus, config, evm_config, eth, @@ -536,6 +680,7 @@ where /// # Example /// /// ```no_run + /// use reth_consensus::noop::NoopConsensus; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; @@ -555,6 +700,7 @@ where /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) + /// .with_consensus(NoopConsensus::default()) /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); /// /// let eth_api = registry.eth_api(); @@ -564,17 +710,27 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, - ) -> RpcRegistryInner + ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; RpcRegistryInner::new( provider, pool, network, executor, events, + consensus, config, evm_config, eth, @@ -594,7 +750,16 @@ where { let mut modules = TransportRpcModules::default(); - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); @@ -605,6 +770,7 @@ where network, executor, events, + consensus, config.unwrap_or_default(), evm_config, eth, @@ -621,9 +787,9 @@ where } } -impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { +impl Default for RpcModuleBuilder<(), (), (), (), (), (), (), ()> { fn default() -> Self { - Self::new((), (), (), (), (), (), ()) + Self::new((), (), (), (), (), (), (), ()) } } @@ -632,6 +798,8 @@ impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { pub struct RpcModuleConfig { /// `eth` namespace settings eth: EthConfig, + /// `flashbots` namespace settings + flashbots: ValidationApiConfig, } // === impl RpcModuleConfig === @@ -643,8 +811,8 @@ impl RpcModuleConfig { } /// Returns a new RPC module config given the eth namespace config - pub const fn new(eth: EthConfig) -> Self { - Self { eth } + pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self { + Self { eth, flashbots } } /// Get a reference to the eth namespace config @@ -662,6 +830,7 @@ impl RpcModuleConfig { #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, + flashbots: Option, } // === impl RpcModuleConfigBuilder === @@ -673,10 +842,16 @@ impl RpcModuleConfigBuilder { self } + /// Configures a custom flashbots namespace config + pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self { + self.flashbots = Some(flashbots); + self + } + /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { - let Self { eth } = self; - RpcModuleConfig { eth: eth.unwrap_or_default() } + let Self { eth, flashbots } = self; + RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() } } /// Get a reference to the eth namespace config, if any @@ -705,6 +880,7 @@ pub struct RpcRegistryInner< Events, EthApi: EthApiTypes, BlockExecutor, + Consensus, > { provider: Provider, pool: Pool, @@ -712,6 +888,9 @@ pub struct RpcRegistryInner< executor: Tasks, events: Events, block_executor: BlockExecutor, + consensus: Consensus, + /// Holds the configuration for the RPC modules + config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers eth: EthHandlers, /// to put trace calls behind semaphore @@ -722,8 +901,8 @@ pub struct RpcRegistryInner< // === impl RpcRegistryInner === -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, @@ -741,6 +920,7 @@ where network: Network, executor: Tasks, events: Events, + consensus: Consensus, config: RpcModuleConfig, evm_config: EvmConfig, eth_api_builder: DynEthApiBuilder< @@ -776,6 +956,8 @@ where network, eth, executor, + consensus, + config, modules: Default::default(), blocking_pool_guard, events, @@ -784,8 +966,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where EthApi: EthApiTypes, { @@ -842,8 +1024,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Network: NetworkInfo + Clone + 'static, EthApi: EthApiTypes, @@ -881,8 +1063,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -994,8 +1176,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -1069,13 +1251,21 @@ where } /// Instantiates `ValidationApi` - pub fn validation_api(&self) -> ValidationApi { - ValidationApi::new(self.provider.clone()) + pub fn validation_api(&self) -> ValidationApi + where + Consensus: reth_consensus::Consensus + Clone + 'static, + { + ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -1084,6 +1274,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1228,9 +1419,14 @@ where .into_rpc() .into() } - RethRpcModule::Flashbots => { - ValidationApi::new(self.provider.clone()).into_rpc().into() - } + RethRpcModule::Flashbots => ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) + .into_rpc() + .into(), }) .clone() }) @@ -1688,7 +1884,7 @@ impl TransportRpcModuleConfig { } /// Sets a custom [`RpcModuleConfig`] for the configured modules. - pub const fn with_config(mut self, config: RpcModuleConfig) -> Self { + pub fn with_config(mut self, config: RpcModuleConfig) -> Self { self.config = Some(config); self } diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 44614ea49a8..175992c0f14 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -3,6 +3,7 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; +use reth_consensus::noop::NoopConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; @@ -126,6 +127,7 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< TestCanonStateSubscriptions, EthEvmConfig, BasicBlockExecutorProvider, + NoopConsensus, > { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) @@ -137,4 +139,5 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_block_executor( BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()), ) + .with_consensus(NoopConsensus::default()) } diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index a016d021586..532c1077203 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; pub const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); /// Additional config values for the eth namespace. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub struct EthConfig { /// Settings for the caching layer pub cache: EthStateCacheConfig, diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 78e6436643a..10ce1650ad1 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -4,6 +4,7 @@ use std::fmt; use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; +use reth_errors::ConsensusError; use reth_primitives::BlockId; /// Helper trait to easily convert various `Result` types into [`RpcResult`] @@ -102,6 +103,7 @@ macro_rules! impl_to_rpc_result { } impl_to_rpc_result!(PayloadError); +impl_to_rpc_result!(ConsensusError); impl_to_rpc_result!(reth_errors::RethError); impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 00799d761d3..876467d1f4b 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true +reth-ethereum-consensus.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true @@ -33,12 +34,14 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-trie.workspace = true +reth-consensus.workspace = true +reth-payload-validator.workspace = true # ethereum alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true alloy-network.workspace = true diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 027edea3cc1..76fb96f9162 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -55,5 +55,5 @@ pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; -pub use validation::ValidationApi; +pub use validation::{ValidationApi, ValidationApiConfig}; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index c6419dc12c0..fe9d0eb4475 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,56 +1,376 @@ +use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; +use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_rpc_types::engine::{ + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, +}; use alloy_rpc_types_beacon::relay::{ - BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, + BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_chainspec::ChainSpecProvider; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, GotExpected, Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - AccountReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, WithdrawalsProvider, + AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, + StateProviderFactory, WithdrawalsProvider, }; -use reth_rpc_api::BlockSubmissionValidationApiServer; -use reth_rpc_server_types::result::internal_rpc_err; -use std::sync::Arc; -use tracing::warn; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_api::{BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3}; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_trie::HashedPostState; +use revm_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, sync::Arc}; + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), +} + +#[derive(Debug, Clone)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc, + /// Execution payload validator. + payload_validator: ExecutionPayloadValidator, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, +} /// The type that implements the `validation` rpc namespace trait -pub struct ValidationApi { - inner: Arc>, +#[derive(Clone, Debug, derive_more::Deref)] +pub struct ValidationApi { + #[deref] + inner: Arc>, } -impl ValidationApi +impl ValidationApi +where + Provider: ChainSpecProvider, +{ + /// Create a new instance of the [`ValidationApi`] + pub fn new( + provider: Provider, + consensus: Arc, + executor_provider: E, + config: ValidationApiConfig, + ) -> Self { + let ValidationApiConfig { disallow } = config; + + let payload_validator = ExecutionPayloadValidator::new(provider.chain_spec()); + let inner = Arc::new(ValidationApiInner { + provider, + consensus, + payload_validator, + executor_provider, + disallow, + }); + + Self { inner } + } +} + +impl ValidationApi where Provider: BlockReaderIdExt - + ChainSpecProvider + + ChainSpecProvider + StateProviderFactory + HeaderProvider + AccountReader + WithdrawalsProvider + Clone + 'static, + E: BlockExecutorProvider, { - /// The provider that can interact with the chain. - pub fn provider(&self) -> Provider { - self.inner.provider.clone() + /// Validates the given block and a [`BidTrace`] against it. + pub fn validate_message_against_block( + &self, + block: SealedBlockWithSenders, + message: BidTrace, + registered_gas_limit: u64, + ) -> Result<(), ValidationApiError> { + self.validate_message_against_header(&block.header, &message)?; + + self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; + self.consensus.validate_header(&block.header)?; + self.consensus.validate_block_pre_execution(&block)?; + + if !self.disallow.is_empty() { + if self.disallow.contains(&block.beneficiary) { + return Err(ValidationApiError::Blacklist(block.beneficiary)) + } + if self.disallow.contains(&message.proposer_fee_recipient) { + return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) + } + for (sender, tx) in block.senders.iter().zip(block.transactions()) { + if self.disallow.contains(sender) { + return Err(ValidationApiError::Blacklist(*sender)) + } + if let Some(to) = tx.to() { + if self.disallow.contains(&to) { + return Err(ValidationApiError::Blacklist(to)) + } + } + } + } + + let latest_header = + self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; + + if latest_header.hash() != block.header.parent_hash { + return Err(ConsensusError::ParentHashMismatch( + GotExpected { got: block.header.parent_hash, expected: latest_header.hash() } + .into(), + ) + .into()) + } + self.consensus.validate_header_against_parent(&block.header, &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + + let state_provider = self.provider.state_by_block_hash(latest_header.hash())?; + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + + let block = block.unseal(); + let mut accessed_blacklisted = None; + let output = executor.execute_with_state_closure( + BlockExecutionInput::new(&block, U256::MAX), + |state| { + if !self.disallow.is_empty() { + for account in state.cache.accounts.keys() { + if self.disallow.contains(account) { + accessed_blacklisted = Some(*account); + } + } + } + }, + )?; + + if let Some(account) = accessed_blacklisted { + return Err(ValidationApiError::Blacklist(account)) + } + + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + self.ensure_payment(&block, &output, &message)?; + + let state_root = + state_provider.state_root(HashedPostState::from_bundle_state(&output.state.state))?; + + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + Ok(()) } - /// Create a new instance of the [`ValidationApi`] - pub fn new(provider: Provider) -> Self { - let inner = Arc::new(ValidationApiInner { provider }); - Self { inner } + /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. + fn validate_message_against_header( + &self, + header: &SealedHeader, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + if header.hash() != message.block_hash { + Err(ValidationApiError::BlockHashMismatch(GotExpected { + got: message.block_hash, + expected: header.hash(), + })) + } else if header.parent_hash != message.parent_hash { + Err(ValidationApiError::ParentHashMismatch(GotExpected { + got: message.parent_hash, + expected: header.parent_hash, + })) + } else if header.gas_limit != message.gas_limit { + Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: message.gas_limit, + expected: header.gas_limit, + })) + } else if header.gas_used != message.gas_used { + return Err(ValidationApiError::GasUsedMismatch(GotExpected { + got: message.gas_used, + expected: header.gas_used, + })) + } else { + Ok(()) + } + } + + /// Ensures that the chosen gas limit is the closest possible value for the validator's + /// registered gas limit. + /// + /// Ref: + fn validate_gas_limit( + &self, + registered_gas_limit: u64, + parent_header: &SealedHeader, + header: &SealedHeader, + ) -> Result<(), ValidationApiError> { + let max_gas_limit = + parent_header.gas_limit + parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + let min_gas_limit = + parent_header.gas_limit - parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + + let best_gas_limit = + std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); + + if best_gas_limit != header.gas_limit { + return Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: header.gas_limit, + expected: best_gas_limit, + })) + } + + Ok(()) + } + + /// Ensures that the proposer has received [`BidTrace::value`] for this block. + /// + /// Firstly attempts to verify the payment by checking the state changes, otherwise falls back + /// to checking the latest block transaction. + fn ensure_payment( + &self, + block: &Block, + output: &BlockExecutionOutput, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + let (mut balance_before, balance_after) = if let Some(acc) = + output.state.state.get(&message.proposer_fee_recipient) + { + let balance_before = acc.original_info.as_ref().map(|i| i.balance).unwrap_or_default(); + let balance_after = acc.info.as_ref().map(|i| i.balance).unwrap_or_default(); + + (balance_before, balance_after) + } else { + // account might have balance but considering it zero is fine as long as we know + // that balance have not changed + (U256::ZERO, U256::ZERO) + }; + + if let Some(withdrawals) = &block.body.withdrawals { + for withdrawal in withdrawals { + if withdrawal.address == message.proposer_fee_recipient { + balance_before += withdrawal.amount_wei(); + } + } + } + + if balance_after >= balance_before + message.value { + return Ok(()) + } + + let (receipt, tx) = output + .receipts + .last() + .zip(block.body.transactions.last()) + .ok_or(ValidationApiError::ProposerPayment)?; + + if !receipt.success { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.to() != Some(message.proposer_fee_recipient) { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.value() != message.value { + return Err(ValidationApiError::ProposerPayment) + } + + if !tx.input().is_empty() { + return Err(ValidationApiError::ProposerPayment) + } + + if let Some(block_base_fee) = block.base_fee_per_gas { + if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { + return Err(ValidationApiError::ProposerPayment) + } + } + + Ok(()) + } + + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle( + &self, + mut blobs_bundle: BlobsBundleV1, + ) -> Result, ValidationApiError> { + if blobs_bundle.commitments.len() != blobs_bundle.proofs.len() || + blobs_bundle.commitments.len() != blobs_bundle.blobs.len() + { + return Err(ValidationApiError::InvalidBlobsBundle) + } + + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + let sidecar = blobs_bundle.pop_sidecar(blobs_bundle.blobs.len()); + + sidecar.validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) } } #[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi +impl BlockSubmissionValidationApiServer for ValidationApi where Provider: BlockReaderIdExt - + ChainSpecProvider + + ChainSpecProvider + StateProviderFactory + HeaderProvider + AccountReader + WithdrawalsProvider + Clone + 'static, + E: BlockExecutorProvider, { async fn validate_builder_submission_v1( &self, @@ -71,24 +391,28 @@ where &self, request: BuilderBlockValidationRequestV3, ) -> RpcResult<()> { - warn!("flashbots_validateBuilderSubmissionV3: blindly accepting request without validation {:?}", request); - Ok(()) - } -} - -impl std::fmt::Debug for ValidationApi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ValidationApi").finish_non_exhaustive() - } -} + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; -impl Clone for ValidationApi { - fn clone(&self) -> Self { - Self { inner: Arc::clone(&self.inner) } + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() } } - -struct ValidationApiInner { - /// The provider that can interact with the chain. - provider: Provider, -} diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1b2899a6485..92ae86f00bb 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -16,6 +16,7 @@ use std::{path::Path, sync::Arc}; use reth::{ api::NodeTypesWithDBAdapter, + beacon_consensus::EthBeaconConsensus, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, @@ -66,9 +67,10 @@ async fn main() -> eyre::Result<()> { .with_noop_pool() .with_noop_network() .with_executor(TokioTaskExecutor::default()) - .with_evm_config(EthEvmConfig::new(spec)) + .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) - .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())); + .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) + .with_consensus(EthBeaconConsensus::new(spec)); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); From bd8c4eceb20c39c6e501d06cf906469329340bb9 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:20:41 -0600 Subject: [PATCH 268/970] replace DisplayHardforks with Box (#12219) Co-authored-by: Matthias Seitz --- crates/chainspec/src/api.rs | 11 +++++------ crates/optimism/chainspec/src/lib.rs | 10 +++++----- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 36640f34b70..ee25f72bae8 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,11 +1,10 @@ use crate::{ChainSpec, DepositContract}; -use alloc::vec::Vec; +use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; -use core::fmt::Debug; -use reth_ethereum_forks::DisplayHardforks; +use core::fmt::{Debug, Display}; use reth_network_peers::NodeRecord; use reth_primitives_traits::Header; @@ -39,7 +38,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn prune_delete_limit(&self) -> usize; /// Returns a string representation of the hardforks. - fn display_hardforks(&self) -> DisplayHardforks; + fn display_hardforks(&self) -> Box; /// The genesis header. fn genesis_header(&self) -> &Header; @@ -89,8 +88,8 @@ impl EthChainSpec for ChainSpec { self.prune_delete_limit } - fn display_hardforks(&self) -> DisplayHardforks { - self.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(Self::display_hardforks(self)) } fn genesis_header(&self) -> &Header { diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 70adf2272cf..8ad36c66afb 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -17,7 +17,7 @@ mod dev; mod op; mod op_sepolia; -use alloc::{vec, vec::Vec}; +use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; use alloy_genesis::Genesis; use alloy_primitives::{Bytes, Parity, Signature, B256, U256}; @@ -30,8 +30,8 @@ pub(crate) use once_cell::sync::Lazy as LazyLock; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; use reth_chainspec::{ - BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, - DisplayHardforks, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, + EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; @@ -287,8 +287,8 @@ impl EthChainSpec for OpChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> DisplayHardforks { - self.inner.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(ChainSpec::display_hardforks(self)) } fn genesis_header(&self) -> &Header { From 921d1cc4b5d37195188f2afcede39fe789cf9582 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 1 Nov 2024 03:44:16 -0600 Subject: [PATCH 269/970] renamed OptimismPoolBuilder to OpPoolBuilder (#12246) --- crates/optimism/node/src/node.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 8e06dbcc500..925e7204c9e 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -66,7 +66,7 @@ impl OptimismNode { args: RollupArgs, ) -> ComponentsBuilder< Node, - OptimismPoolBuilder, + OpPoolBuilder, OpPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, @@ -80,7 +80,7 @@ impl OptimismNode { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; ComponentsBuilder::default() .node_types::() - .pool(OptimismPoolBuilder::default()) + .pool(OpPoolBuilder::default()) .payload(OpPayloadBuilder::new(compute_pending_block)) .network(OptimismNetworkBuilder { disable_txpool_gossip, @@ -99,7 +99,7 @@ where { type ComponentsBuilder = ComponentsBuilder< N, - OptimismPoolBuilder, + OpPoolBuilder, OpPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, @@ -206,12 +206,12 @@ where /// This contains various settings that can be configured and take precedence over the node's /// config. #[derive(Debug, Default, Clone)] -pub struct OptimismPoolBuilder { +pub struct OpPoolBuilder { /// Enforced overrides that are applied to the pool config. pub pool_config_overrides: PoolBuilderConfigOverrides, } -impl PoolBuilder for OptimismPoolBuilder +impl PoolBuilder for OpPoolBuilder where Node: FullNodeTypes>, { From 8d31b652425d0e67be1577d41be77331028b93d6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 11:17:43 +0100 Subject: [PATCH 270/970] chore: clippy happy (#12248) --- crates/rpc/rpc-testing-util/src/debug.rs | 4 ++-- crates/rpc/rpc-testing-util/src/trace.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index f50064e80ce..97fe008fa97 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -292,7 +292,7 @@ pub struct DebugTraceTransactionsStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceTransactionsStream<'a> { +impl DebugTraceTransactionsStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> { loop { @@ -324,7 +324,7 @@ pub struct DebugTraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceBlockStream<'a> { +impl DebugTraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index c6dc16cf106..0fefef7c997 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -381,7 +381,7 @@ pub struct TraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> TraceBlockStream<'a> { +impl TraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { From 39bc8ce81a237fd5452ae31d0362a1777a7d3c5f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 1 Nov 2024 11:35:06 +0100 Subject: [PATCH 271/970] refactor(revm): simplify `Database` impl for `StateProviderDatabase` (#12241) --- crates/revm/src/database.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 8f40d2be8d9..5f662fea7cf 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -101,21 +101,21 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic(&mut self, address: Address) -> Result, Self::Error> { - DatabaseRef::basic_ref(self, address) + self.basic_ref(address) } /// Retrieves the bytecode associated with a given code hash. /// /// Returns `Ok` with the bytecode if found, or the default bytecode otherwise. fn code_by_hash(&mut self, code_hash: B256) -> Result { - DatabaseRef::code_by_hash_ref(self, code_hash) + self.code_by_hash_ref(code_hash) } /// Retrieves the storage value at a specific index for a given address. /// /// Returns `Ok` with the storage value, or the default value if not found. fn storage(&mut self, address: Address, index: U256) -> Result { - DatabaseRef::storage_ref(self, address, index) + self.storage_ref(address, index) } /// Retrieves the block hash for a given block number. @@ -123,7 +123,7 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. fn block_hash(&mut self, number: u64) -> Result { - DatabaseRef::block_hash_ref(self, number) + self.block_hash_ref(number) } } From f0cef9dc51fa738490d09f9d64bf03ae27a01fd6 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 1 Nov 2024 11:33:58 +0100 Subject: [PATCH 272/970] revm: add `Database` `Either` helper type (#12240) --- crates/revm/src/either.rs | 52 +++++++++++++++++++++++++++++++++++++++ crates/revm/src/lib.rs | 3 +++ 2 files changed, 55 insertions(+) create mode 100644 crates/revm/src/either.rs diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs new file mode 100644 index 00000000000..e93ba3a8d01 --- /dev/null +++ b/crates/revm/src/either.rs @@ -0,0 +1,52 @@ +use alloy_primitives::{Address, B256, U256}; +use revm::{ + primitives::{AccountInfo, Bytecode}, + Database, +}; + +/// An enum type that can hold either of two different [`Database`] implementations. +/// +/// This allows flexible usage of different [`Database`] types in the same context. +#[derive(Debug, Clone)] +pub enum Either { + /// A value of type `L`. + Left(L), + /// A value of type `R`. + Right(R), +} + +impl Database for Either +where + L: Database, + R: Database, +{ + type Error = L::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic(address), + Self::Right(db) => db.basic(address), + } + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash(code_hash), + Self::Right(db) => db.code_by_hash(code_hash), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + match self { + Self::Left(db) => db.storage(address, index), + Self::Right(db) => db.storage(address, index), + } + } + + fn block_hash(&mut self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash(number), + Self::Right(db) => db.block_hash(number), + } + } +} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 02eb182ee11..8b544a53728 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -22,3 +22,6 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; + +/// Either type for flexible usage of different database types in the same context. +pub mod either; From 249c600dd9c8a7ae8556ae004f366b66644ac569 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 1 Nov 2024 11:34:48 +0100 Subject: [PATCH 273/970] rpc: add `rename` method in `TransportRpcModules` (#12239) --- crates/rpc/rpc-builder/src/lib.rs | 56 +++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 5d5fd31aab8..696d3501430 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -2015,7 +2015,7 @@ impl TransportRpcModules { Ok(false) } - /// Merge the given [Methods] in all configured methods. + /// Merge the given [`Methods`] in all configured methods. /// /// Fails if any of the methods in other is present already. pub fn merge_configured( @@ -2106,7 +2106,22 @@ impl TransportRpcModules { http_removed || ws_removed || ipc_removed } - /// Replace the given [Methods] in the configured http methods. + /// Renames a method in all configured transports by: + /// 1. Removing the old method name. + /// 2. Adding the new method. + pub fn rename( + &mut self, + old_name: &'static str, + new_method: impl Into, + ) -> Result<(), RegisterMethodError> { + // Remove the old method from all configured transports + self.remove_method_from_configured(old_name); + + // Merge the new method into the configured transports + self.merge_configured(new_method) + } + + /// Replace the given [`Methods`] in the configured http methods. /// /// Fails if any of the methods in other is present already or if the method being removed is /// not present @@ -2478,6 +2493,43 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); } + #[test] + fn test_transport_rpc_module_rename() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; + + // Verify that the old we want to rename exists at the start + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + + // Verify that the new method does not exist at the start + assert!(modules.http.as_ref().unwrap().method("something").is_none()); + assert!(modules.ws.as_ref().unwrap().method("something").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_none()); + + // Create another module + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + // Rename the method + modules.rename("anything", other_module).expect("rename failed"); + + // Verify that the old method was removed from all transports + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + + // Verify that the new method was added to all transports + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + } + #[test] fn test_replace_http_method() { let mut modules = From 2758a560c0ed1afb9b395123eef788e2426b951b Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Fri, 1 Nov 2024 07:10:55 -0400 Subject: [PATCH 274/970] txpool: added a helper to filter pending txns by predicate (#12204) --- crates/transaction-pool/src/lib.rs | 7 +++++++ crates/transaction-pool/src/noop.rs | 7 +++++++ crates/transaction-pool/src/pool/mod.rs | 8 ++++++++ crates/transaction-pool/src/pool/txpool.rs | 8 ++++++++ crates/transaction-pool/src/traits.rs | 6 ++++++ 5 files changed, 36 insertions(+) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index f8f06b805f9..02037599432 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -496,6 +496,13 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pool.pending_transactions_with_predicate(predicate) + } + fn get_pending_transactions_by_sender( &self, sender: Address, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 0f87f06f7dd..47a26ee29a3 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -213,6 +213,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_with_predicate( + &self, + _predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + vec![] + } + fn get_pending_transactions_by_sender( &self, _sender: Address, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 2e73409546d..77446a52375 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -787,6 +787,14 @@ where self.get_pool_data().pending_txs_by_sender(sender_id) } + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.get_pool_data().pending_transactions_with_predicate(predicate) + } + /// Returns all pending transactions of the address by sender pub(crate) fn get_pending_transactions_by_sender( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index bd0aacccfee..8679a4318be 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -364,6 +364,14 @@ impl TxPool { self.pending_pool.all() } + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + mut predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| predicate(tx)).collect() + } + /// Returns all pending transactions for the specified sender pub(crate) fn pending_txs_by_sender( &self, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0d8f6dbb54a..2667143b7c8 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -342,6 +342,12 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all pending transactions filtered by predicate + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>>; + /// Returns all pending transactions sent by a given user fn get_pending_transactions_by_sender( &self, From c6b740801fda7963baeb33ac395227032825bb7e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 12:11:54 +0100 Subject: [PATCH 275/970] chore: apply same member order (#12253) --- crates/blockchain-tree/src/noop.rs | 12 ++-- crates/exex/exex/src/notifications.rs | 10 ++-- crates/node/builder/src/builder/states.rs | 14 ++--- crates/optimism/rpc/src/eth/mod.rs | 2 +- crates/primitives/src/block.rs | 14 ++--- crates/rpc/rpc-eth-api/src/node.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 2 +- .../src/providers/database/provider.rs | 10 ++-- crates/storage/provider/src/providers/mod.rs | 58 +++++++++---------- .../storage/provider/src/test_utils/mock.rs | 24 ++++---- .../storage/provider/src/test_utils/noop.rs | 32 +++++----- crates/trie/trie/src/hashed_cursor/noop.rs | 8 +-- crates/trie/trie/src/prefix_set.rs | 2 +- 13 files changed, 95 insertions(+), 95 deletions(-) diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 925b8f03add..862b02e7607 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -60,6 +60,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { Ok(()) } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + Ok(BTreeMap::new()) + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { Ok(()) } @@ -67,12 +73,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 90a0ee230a4..14cfe9be4d9 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -108,11 +108,6 @@ where }); } - fn without_head(mut self) -> Self { - self.set_without_head(); - self - } - fn set_with_head(&mut self, exex_head: ExExHead) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithHead(match current { @@ -131,6 +126,11 @@ where }); } + fn without_head(mut self) -> Self { + self.set_without_head(); + self + } + fn with_head(mut self, exex_head: ExExHead) -> Self { self.set_with_head(exex_head); self diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index e75a07802a6..ca5a57d0db6 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -95,8 +95,8 @@ impl> FullNodeComponents for NodeAdapter< type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; - type Network = C::Network; type Consensus = C::Consensus; + type Network = C::Network; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -110,8 +110,8 @@ impl> FullNodeComponents for NodeAdapter< self.components.block_executor() } - fn provider(&self) -> &Self::Provider { - &self.provider + fn consensus(&self) -> &Self::Consensus { + self.components.consensus() } fn network(&self) -> &Self::Network { @@ -122,12 +122,12 @@ impl> FullNodeComponents for NodeAdapter< self.components.payload_builder() } - fn task_executor(&self) -> &TaskExecutor { - &self.task_executor + fn provider(&self) -> &Self::Provider { + &self.provider } - fn consensus(&self) -> &Self::Consensus { - self.components.consensus() + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 7b427467b2c..dc6e8e59fa6 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -119,8 +119,8 @@ where { type Provider = N::Provider; type Pool = N::Pool; - type Network = ::Network; type Evm = ::Evm; + type Network = ::Network; #[inline] fn pool(&self) -> &Self::Pool { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a06979300ac..7e5e76f1b06 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -150,27 +150,27 @@ mod block_rlp { } impl Encodable for Block { - fn length(&self) -> usize { - let helper: HelperRef<'_, _> = self.into(); - helper.length() - } - fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } - } - impl Encodable for SealedBlock { fn length(&self) -> usize { let helper: HelperRef<'_, _> = self.into(); helper.length() } + } + impl Encodable for SealedBlock { fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } + + fn length(&self) -> usize { + let helper: HelperRef<'_, _> = self.into(); + helper.length() + } } } diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 851b26b72b9..4ae79c08341 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -38,8 +38,8 @@ where { type Provider = T::Provider; type Pool = T::Pool; - type Network = ::Network; type Evm = ::Evm; + type Network = ::Network; #[inline] fn pool(&self) -> &Self::Pool { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 339f2200c67..026c87153f2 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -150,8 +150,8 @@ where { type Provider = Provider; type Pool = Pool; - type Network = Network; type Evm = EvmConfig; + type Network = Network; fn pool(&self) -> &Self::Pool { self.inner.pool() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 81affa0d804..2af22cec0b5 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2055,6 +2055,11 @@ impl StageCheckpointReader for DatabaseProvider { Ok(self.tx.get::(id.to_string())?) } + /// Get stage checkpoint progress. + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + Ok(self.tx.get::(id.to_string())?) + } + fn get_all_checkpoints(&self) -> ProviderResult> { self.tx .cursor_read::()? @@ -2062,11 +2067,6 @@ impl StageCheckpointReader for DatabaseProvider { .collect::, _>>() .map_err(ProviderError::Database) } - - /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - Ok(self.tx.get::(id.to_string())?) - } } impl StageCheckpointWriter for DatabaseProvider { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c81ef05d2ea..3b24617fd95 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -601,35 +601,6 @@ impl StateProviderFactory for BlockchainProvider { self.database.latest() } - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. /// /// Note: if a number is provided this will only look at historical(canonical) state. @@ -662,6 +633,35 @@ impl StateProviderFactory for BlockchainProvider { } } + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); + self.ensure_canonical_block(block_number)?; + self.database.history_by_block_number(block_number) + } + + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + self.database.history_by_block_hash(block_hash) + } + + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); + + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } + } + + state + } + /// Returns the state provider for pending state. /// /// If there's no pending block available then the latest state provider is returned: diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index e2e08e61a86..07fa505b2b4 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -760,18 +760,6 @@ impl StateProviderFactory for MockEthProvider { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -798,6 +786,18 @@ impl StateProviderFactory for MockEthProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index f6f7e185de6..e0943764772 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -465,22 +465,6 @@ impl StateProviderFactory for NoopProvider { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn pending(&self) -> ProviderResult { - Ok(Box::new(*self)) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -507,6 +491,22 @@ impl StateProviderFactory for NoopProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(*self)) + } + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(*self))) } diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 4783d5afd9d..a21e1026b38 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -32,11 +32,11 @@ pub struct NoopHashedAccountCursor; impl HashedCursor for NoopHashedAccountCursor { type Value = Account; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -49,11 +49,11 @@ pub struct NoopHashedStorageCursor; impl HashedCursor for NoopHashedStorageCursor { type Value = U256; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index 0cf16f939d7..d904ef38fdd 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -211,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; type Item = &'a reth_trie_common::Nibbles; + type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } From f52186cc4dbc1598be8e6a7fa17cf0cedd56a1a3 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Fri, 1 Nov 2024 19:30:03 +0800 Subject: [PATCH 276/970] db-api: opt StorageShardedKey encode, decode (#12143) --- .../storage/db-api/src/models/storage_sharded_key.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 5fd79ba655c..a7a1ffb71be 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -12,6 +12,10 @@ use super::ShardedKey; /// Number of indices in one shard. pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; +/// The size of [`StorageShardedKey`] encode bytes. +/// The fields are: 20-byte address, 32-byte key, and 8-byte block number +const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8; + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -53,7 +57,8 @@ impl Encode for StorageShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { - let mut buf: Vec = Encode::encode(self.address).into(); + let mut buf: Vec = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE); + buf.extend_from_slice(&Encode::encode(self.address)); buf.extend_from_slice(&Encode::encode(self.sharded_key.key)); buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); buf @@ -62,6 +67,9 @@ impl Encode for StorageShardedKey { impl Decode for StorageShardedKey { fn decode(value: &[u8]) -> Result { + if value.len() != STORAGE_SHARD_KEY_BYTES_SIZE { + return Err(DatabaseError::Decode) + } let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( From 927be855ffbdf8186a69ae90168a784bdd0651cc Mon Sep 17 00:00:00 2001 From: caglarkaya Date: Fri, 1 Nov 2024 14:32:12 +0300 Subject: [PATCH 277/970] feat: track buffered outgoing messages (#12220) --- crates/net/network/src/metrics.rs | 2 ++ crates/net/network/src/session/active.rs | 31 ++++++++++++++++++++++-- crates/net/network/src/session/mod.rs | 5 +++- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 4333cf1408b..bda5f84c76b 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -85,6 +85,8 @@ pub struct SessionManagerMetrics { pub(crate) total_dial_successes: Counter, /// Number of dropped outgoing peer messages. pub(crate) total_outgoing_peer_messages_dropped: Counter, + /// Number of queued outgoing messages + pub(crate) queued_outgoing_messages: Gauge, } /// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager). diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index e83f5d9f125..10048823c54 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -12,6 +12,7 @@ use std::{ }; use futures::{stream::Fuse, SinkExt, StreamExt}; +use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, @@ -87,7 +88,7 @@ pub(crate) struct ActiveSession { /// All requests that were sent by the remote peer and we're waiting on an internal response pub(crate) received_requests_from_remote: Vec, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: VecDeque, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -757,6 +758,32 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> smoothened_timeout.clamp(MINIMUM_TIMEOUT, MAXIMUM_TIMEOUT) } + +/// A helper struct that wraps the queue of outgoing messages and a metric to track their count +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque, + count: Gauge, +} + +impl QueuedOutgoingMessages { + pub(crate) const fn new(metric: Gauge) -> Self { + Self { messages: VecDeque::new(), count: metric } + } + + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + self.messages.push_back(message); + self.count.increment(1); + } + + pub(crate) fn pop_front(&mut self) -> Option { + self.messages.pop_front().inspect(|_| self.count.decrement(1)) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.messages.shrink_to_fit(); + } +} + #[cfg(test)] mod tests { use super::*; @@ -882,7 +909,7 @@ mod tests { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new(Gauge::noop()), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( INITIAL_REQUEST_TIMEOUT, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 3522aa6a75b..712f076b47d 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -5,6 +5,7 @@ mod conn; mod counter; mod handle; +use active::QueuedOutgoingMessages; pub use conn::EthRlpxConnection; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, @@ -495,7 +496,9 @@ impl SessionManager { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new( + self.metrics.queued_outgoing_messages.clone(), + ), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( self.initial_internal_request_timeout, From f93dbf54c3b1181b166a0813cfeb95296ec0c6cd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Nov 2024 19:49:37 +0800 Subject: [PATCH 278/970] Remove redundant `SignedTransaction::Signature` (#12185) --- crates/primitives-traits/src/transaction/signed.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 1bc8308b13f..748ef39666d 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -5,7 +5,7 @@ use core::hash::Hash; use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; -use alloy_primitives::{keccak256, Address, TxHash, B256}; +use alloy_primitives::{keccak256, Address, Signature, TxHash, B256}; /// A signed transaction. pub trait SignedTransaction: @@ -26,9 +26,6 @@ pub trait SignedTransaction: /// Transaction type that is signed. type Transaction: Transaction; - /// Signature type that results from signing transaction. - type Signature; - /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; @@ -36,7 +33,7 @@ pub trait SignedTransaction: fn transaction(&self) -> &Self::Transaction; /// Returns reference to signature. - fn signature(&self) -> &Self::Signature; + fn signature(&self) -> &Signature; /// Recover signer from signature and hash. /// @@ -59,10 +56,8 @@ pub trait SignedTransaction: /// Create a new signed transaction from a transaction and its signature. /// /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: Self::Signature, - ) -> Self; + fn from_transaction_and_signature(transaction: Self::Transaction, signature: Signature) + -> Self; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. From f6bd11c7116dbf46a4450cb69121d015b4b705b6 Mon Sep 17 00:00:00 2001 From: Aliaksei Misiukevich Date: Fri, 1 Nov 2024 14:54:55 +0100 Subject: [PATCH 279/970] feature: transaction's input truncate function (#12236) Signed-off-by: nadtech-hub --- crates/rpc/rpc-types-compat/src/transaction/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index f8b46454dc2..2d92747d401 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -102,7 +102,9 @@ impl TransactionCompat for () { WithOtherFields::default() } - fn otterscan_api_truncate_input(_tx: &mut Self::Transaction) {} + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { + tx.input = tx.input.slice(..4); + } fn tx_type(_tx: &Self::Transaction) -> u8 { 0 From c1a68f23cf2d655cd8137660fc82c7d83c094b20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 1 Nov 2024 20:55:23 +0700 Subject: [PATCH 280/970] refactor: move `payload/builder/src/database.rs` to `revm/src/cached.rs` (#12252) Co-authored-by: Matthias Seitz --- Cargo.lock | 237 +++++++++--------- .../src/commands/debug_cmd/build_block.rs | 3 +- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 5 +- crates/payload/builder/Cargo.toml | 10 +- crates/payload/builder/src/lib.rs | 1 - .../src/database.rs => revm/src/cached.rs} | 12 +- crates/revm/src/lib.rs | 8 +- 8 files changed, 147 insertions(+), 130 deletions(-) rename crates/{payload/builder/src/database.rs => revm/src/cached.rs} (96%) diff --git a/Cargo.lock b/Cargo.lock index 5fc3574ebc4..4c469cd21e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.42" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca4a1469a3e572e9ba362920ff145f5d0a00a3e71a64ddcb4a3659cf64c76a7" +checksum = "4feb7c662fd0be3d0c926a456be4ac44e9cf8e05cbd91df6db7f7140b861016a" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -150,9 +150,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5647fce5a168f9630f935bf7821c4207b1755184edaeba783cb4e11d35058484" +checksum = "f5228b189b18b85761340dc9eaac0141148a8503657b36f9bc3a869413d987ca" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -229,9 +229,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5671117c38b1c2306891f97ad3828d85487087f54ebe2c7591a055ea5bcea7" +checksum = "31a0f0d51db8a1a30a4d98a9f90e090a94c8f44cb4d9eafc7e03aa6d00aae984" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -306,9 +306,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71738eb20c42c5fb149571e76536a0f309d142f3957c28791662b96baf77a3d" +checksum = "8edae627382349b56cd6a7a2106f4fd69b243a9233e560c55c2e03cabb7e1d3c" dependencies = [ "alloy-rlp", "arbitrary", @@ -415,7 +415,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -630,23 +630,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0900b83f4ee1f45c640ceee596afbc118051921b9438fdb5a3175c1a7e05f8b" +checksum = "841eabaa4710f719fddbc24c95d386eae313f07e6da4babc25830ee37945be0c" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41b1e78dde06b5e12e6702fa8c1d30621bf07728ba75b801fb801c9c6a0ba10" +checksum = "6672337f19d837b9f7073c45853aeb528ed9f7dd6a4154ce683e9e5cb7794014" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -655,31 +655,31 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91dc311a561a306664393407b88d3e53ae58581624128afd8a15faa5de3627dc" +checksum = "0dff37dd20bfb118b777c96eda83b2067f4226d2644c5cfa00187b3bc01770ba" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d1fbee9e698f3ba176b6e7a145f4aefe6d2b746b611e8bb246fe11a0e9f6c4" +checksum = "5b853d42292dbb159671a3edae3b2750277ff130f32b726fe07dc2b17aa6f2b5" dependencies = [ "serde", "winnow", @@ -687,9 +687,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086f41bc6ebcd8cb15f38ba20e47be38dd03692149681ce8061c35d960dbf850" +checksum = "aa828bb1b9a6dc52208fbb18084fb9ce2c30facc2bfda6a5d922349b4990354f" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -862,9 +862,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" [[package]] name = "aquamarine" @@ -877,7 +877,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1100,7 +1100,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1111,7 +1111,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1149,7 +1149,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1255,7 +1255,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1437,7 +1437,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "synstructure", ] @@ -1559,7 +1559,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -1677,6 +1677,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -1771,7 +1777,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2228,7 +2234,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2252,7 +2258,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2263,7 +2269,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2385,7 +2391,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2396,7 +2402,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2417,7 +2423,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "unicode-xid", ] @@ -2531,7 +2537,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2679,7 +2685,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2690,7 +2696,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -2747,7 +2753,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -3303,7 +3309,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -3788,9 +3794,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -3829,7 +3835,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -3979,7 +3985,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -4147,7 +4153,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -4395,7 +4401,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -4571,9 +4577,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00419de735aac21d53b0de5ce2c03bd3627277cf471300f27ebc89f7d828047" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" @@ -4813,7 +4819,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -4959,7 +4965,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5207,7 +5213,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5260,9 +5266,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7c98055fd048073738df0cc6d6537e992a0d8828f39d99a469e870db126dbd" +checksum = "f26c3b35b7b3e36d15e0563eebffe13c1d9ca16b7aaffcb6a64354633547e16b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5278,9 +5284,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d631e8113cf88d30e621022677209caa148a9ca3ccb590fd34bbd1c731e3aff3" +checksum = "ccacc2efed3d60d98ea581bddb885df1c6c62a592e55de049cfefd94116112cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5292,9 +5298,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eabe7683d7e19c7cc5171d664e49fc449176cf1334ffff82808e2a7eea5933a" +checksum = "5ff6fc0f94702ea0f4d8466bffdc990067ae6df9213465df9b7957f74f1e5461" dependencies = [ "alloy-consensus", "alloy-network", @@ -5306,26 +5312,29 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b39574acb1873315e6bd89df174f6223e897188fb87eeea2ad1eda04f7d28eb" +checksum = "f5f8e6ec6b91c6aaeb20860b455a52fd8e300acfe5d534e96e9073a24f853e74" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "async-trait", "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", "serde", + "tracing", + "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919e9b69212d61f3c8932bfb717c7ad458ea3fc52072b3433d99994f8223d555" +checksum = "94bae9bf91b620e1e2c2291562e5998bc1247bd8ada011773e1997b31a95de99" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5341,9 +5350,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3a47ea24cee189b4351be247fd138c68571704ee57060cf5a722502f44412c" +checksum = "4b52ee59c86537cff83e8c7f2a6aa287a94f3608bb40c06d442aafd0c2e807a4" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -5561,7 +5570,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5590,7 +5599,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5762,7 +5771,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5813,7 +5822,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5911,7 +5920,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -5990,10 +5999,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -6229,9 +6239,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -6403,6 +6413,7 @@ dependencies = [ "reth-payload-primitives", "reth-primitives", "reth-provider", + "reth-revm", "reth-tasks", "reth-transaction-pool", "revm", @@ -6733,7 +6744,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -9606,9 +9617,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -9883,22 +9894,22 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -9933,7 +9944,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -9984,7 +9995,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10007,7 +10018,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10293,7 +10304,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10351,9 +10362,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" dependencies = [ "proc-macro2", "quote", @@ -10362,14 +10373,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5e0c2ea8db64b2898b62ea2fbd60204ca95e0b2c6bdf53ff768bbe916fbe4d" +checksum = "16320d4a2021ba1a32470b3759676114a918885e9800e68ad60f2c67969fba62" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10395,7 +10406,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10472,7 +10483,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10496,22 +10507,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10688,7 +10699,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -10889,7 +10900,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11280,7 +11291,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11351,7 +11362,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "wasm-bindgen-shared", ] @@ -11385,7 +11396,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11541,7 +11552,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11552,7 +11563,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11563,7 +11574,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11574,7 +11585,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11849,7 +11860,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "synstructure", ] @@ -11871,7 +11882,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11891,7 +11902,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", "synstructure", ] @@ -11912,7 +11923,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] @@ -11934,7 +11945,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.86", ] [[package]] diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index a2dfb5ab3ea..0559d473f62 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -26,7 +26,6 @@ use reth_node_api::{ EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, }; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::database::CachedReads; use reth_primitives::{ revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, @@ -36,7 +35,7 @@ use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 88ab99272db..74dea45d10d 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -21,6 +21,7 @@ reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-evm.workspace = true +reth-revm.workspace=true # ethereum alloy-rlp.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index bb8dc0ef66a..7b1de980ce9 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -16,9 +16,7 @@ use futures_core::ready; use futures_util::FutureExt; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_evm::state_change::post_block_withdrawals_balance_increments; -use reth_payload_builder::{ - database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, -}; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; @@ -28,6 +26,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; +use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{Database, State}; diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 3b71011e02e..08399b6f9c6 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives = { workspace = true, optional = true } reth-provider.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true reth-chain-state = { workspace = true, optional = true } # alloy +alloy-primitives = { workspace = true, optional = true } alloy-rpc-types = { workspace = true, features = ["engine"] } -alloy-primitives.workspace = true # async async-trait.workspace = true @@ -37,12 +37,16 @@ metrics.workspace = true tracing.workspace = true [dev-dependencies] +reth-primitives.workspace = true +reth-chain-state.workspace = true +alloy-primitives.workspace = true revm.workspace = true [features] test-utils = [ + "alloy-primitives", "reth-chain-state", - "reth-chain-state?/test-utils", + "reth-chain-state/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", "revm/test-utils" diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 7af61ac4c68..2c46a4a9e16 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -101,7 +101,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod database; mod metrics; mod service; mod traits; diff --git a/crates/payload/builder/src/database.rs b/crates/revm/src/cached.rs similarity index 96% rename from crates/payload/builder/src/database.rs rename to crates/revm/src/cached.rs index d63f7322dee..b0eac39c44c 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/revm/src/cached.rs @@ -1,13 +1,13 @@ //! Database adapters for payload building. -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{ + map::{Entry, HashMap}, + Address, B256, U256, +}; +use core::cell::RefCell; use reth_primitives::revm_primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; -use std::{ - cell::RefCell, - collections::{hash_map::Entry, HashMap}, -}; /// A container type that caches reads from an underlying [`DatabaseRef`]. /// @@ -17,7 +17,7 @@ use std::{ /// # Example /// /// ``` -/// use reth_payload_builder::database::CachedReads; +/// use reth_revm::cached::CachedReads; /// use revm::db::{DatabaseRef, State}; /// /// fn build_payload(db: DB) { diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 8b544a53728..b06ee816f8d 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,11 +11,15 @@ extern crate alloc; +pub mod batch; + +/// Cache database that reads from an underlying [`DatabaseRef`]. +/// Database adapters for payload building. +pub mod cached; + /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; -pub mod batch; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; From d5a3a3a849265496af53b7f4fd460b572fb4fca3 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:57:34 +0100 Subject: [PATCH 281/970] test(chain-state): add unit tests for `CanonStateNotification` (#12110) --- crates/chain-state/src/notifications.rs | 226 ++++++++++++++++++++++++ crates/evm/execution-types/src/chain.rs | 2 +- 2 files changed, 227 insertions(+), 1 deletion(-) diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index fc717314b3f..6e24bcbb4c8 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -190,3 +190,229 @@ impl Stream for ForkChoiceStream { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; + + #[test] + fn test_commit_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let chain = Arc::new(Chain::new( + vec![block1.clone(), block2.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a commit notification + let notification = CanonStateNotification::Commit { new: chain.clone() }; + + // Test that `committed` returns the correct chain + assert_eq!(notification.committed(), chain); + + // Test that `reverted` returns None for `Commit` + assert!(notification.reverted().is_none()); + + // Test that `tip` returns the correct block + assert_eq!(*notification.tip(), block2); + } + + #[test] + fn test_reorg_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + let block3_hash = B256::new([0x03; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block.clone(); + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let mut block3 = block; + block3.set_block_number(3); + block3.set_hash(block3_hash); + + let old_chain = + Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); + let new_chain = Arc::new(Chain::new( + vec![block2.clone(), block3.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a reorg notification + let notification = + CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; + + // Test that `reverted` returns the old chain + assert_eq!(notification.reverted(), Some(old_chain)); + + // Test that `committed` returns the new chain + assert_eq!(notification.committed(), new_chain); + + // Test that `tip` returns the tip of the new chain (last block in the new chain) + assert_eq!(*notification.tip(), block3); + } + + #[test] + fn test_block_receipts_commit() { + // Create a default block instance for use in block definitions. + let block = SealedBlockWithSenders::default(); + + // Define unique hashes for two blocks to differentiate them in the chain. + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Create a default transaction to include in block1's transactions. + let tx = TransactionSigned::default(); + + // Create a clone of the default block and customize it to act as block1. + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + // Add the transaction to block1's transactions. + block1.block.body.transactions.push(tx); + + // Clone the default block and customize it to act as block2. + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + // Create a receipt for the transaction in block1. + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + + // Wrap the receipt in a `Receipts` structure, as expected in the `ExecutionOutcome`. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt1.clone())]] }; + + // Define an `ExecutionOutcome` with the created receipts. + let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; + + // Create a new chain segment with `block1` and `block2` and the execution outcome. + let new_chain = + Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); + + // Create a commit notification containing the new chain segment. + let notification = CanonStateNotification::Commit { new: new_chain }; + + // Call `block_receipts` on the commit notification to retrieve block receipts. + let block_receipts = notification.block_receipts(); + + // Assert that only one receipt entry exists in the `block_receipts` list. + assert_eq!(block_receipts.len(), 1); + + // Verify that the first entry matches block1's hash and transaction receipt. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: block1.num_hash(), + tx_receipts: vec![(B256::default(), receipt1)] + } + ); + + // Assert that the receipt is from the committed segment (not reverted). + assert!(!block_receipts[0].1); + } + + #[test] + fn test_block_receipts_reorg() { + // Define block1 for the old chain segment, which will be reverted. + let mut old_block1 = SealedBlockWithSenders::default(); + old_block1.set_block_number(1); + old_block1.set_hash(B256::new([0x01; 32])); + old_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the reverted block. + let old_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 54321, + logs: vec![], + success: false, + ..Default::default() + }; + let old_receipts = Receipts { receipt_vec: vec![vec![Some(old_receipt.clone())]] }; + + let old_execution_outcome = + ExecutionOutcome { receipts: old_receipts, ..Default::default() }; + + // Create an old chain segment to be reverted, containing `old_block1`. + let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + + // Define block2 for the new chain segment, which will be committed. + let mut new_block1 = SealedBlockWithSenders::default(); + new_block1.set_block_number(2); + new_block1.set_hash(B256::new([0x02; 32])); + new_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the new committed block. + let new_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + let new_receipts = Receipts { receipt_vec: vec![vec![Some(new_receipt.clone())]] }; + + let new_execution_outcome = + ExecutionOutcome { receipts: new_receipts, ..Default::default() }; + + // Create a new chain segment to be committed, containing `new_block1`. + let new_chain = Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, None)); + + // Create a reorg notification with both reverted (old) and committed (new) chain segments. + let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain }; + + // Retrieve receipts from both old (reverted) and new (committed) segments. + let block_receipts = notification.block_receipts(); + + // Assert there are two receipt entries, one from each chain segment. + assert_eq!(block_receipts.len(), 2); + + // Verify that the first entry matches old_block1 and its receipt from the reverted segment. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: old_block1.num_hash(), + tx_receipts: vec![(B256::default(), old_receipt)] + } + ); + // Confirm this is from the reverted segment. + assert!(block_receipts[0].1); + + // Verify that the second entry matches new_block1 and its receipt from the committed + // segment. + assert_eq!( + block_receipts[1].0, + BlockReceipts { + block: new_block1.num_hash(), + tx_receipts: vec![(B256::default(), new_receipt)] + } + ); + // Confirm this is from the committed segment. + assert!(!block_receipts[1].1); + } +} diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 65f96ff5638..dc633e2d7ab 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -453,7 +453,7 @@ impl IntoIterator for ChainBlocks<'_> { } /// Used to hold receipts and their attachment. -#[derive(Default, Clone, Debug)] +#[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, From eaac2aa2cfb6ea5a61eea2d9a36a8b66b7854ce4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 15:17:31 +0100 Subject: [PATCH 282/970] chore: simplify cached db usage (#12242) --- crates/ethereum/payload/src/lib.rs | 5 ++--- crates/optimism/payload/src/builder.rs | 5 ++--- crates/revm/src/cached.rs | 24 +++++++++++++++++++----- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 8e92d0aa870..ed33292ef98 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -161,7 +161,7 @@ where let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); let PayloadConfig { parent_header, extra_data, attributes } = config; debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); @@ -372,8 +372,7 @@ where // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 09a443d0f1d..cae2d34bd49 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -170,7 +170,7 @@ where let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); let PayloadConfig { parent_header, attributes, mut extra_data } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); @@ -445,8 +445,7 @@ where // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", parent_header=%parent_header.hash(), %err, diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index b0eac39c44c..2152ca5bd73 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -22,10 +22,10 @@ use reth_primitives::revm_primitives::{ /// /// fn build_payload(db: DB) { /// let mut cached_reads = CachedReads::default(); -/// let db_ref = cached_reads.as_db(db); -/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. +/// let db = cached_reads.as_db_mut(db); +/// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. /// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. -/// let db = State::builder().with_database_ref(db_ref).build(); +/// let state = State::builder().with_database(db).build(); /// } /// ``` #[derive(Debug, Clone, Default)] @@ -40,10 +40,11 @@ pub struct CachedReads { impl CachedReads { /// Gets a [`DatabaseRef`] that will cache reads from the given database. pub fn as_db(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> { - CachedReadsDBRef { inner: RefCell::new(self.as_db_mut(db)) } + self.as_db_mut(db).into_db() } - fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { + /// Gets a mutable [`Database`] that will cache reads from the underlying database. + pub fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { CachedReadsDbMut { cached: self, db } } @@ -67,6 +68,19 @@ pub struct CachedReadsDbMut<'a, DB> { pub db: DB, } +impl<'a, DB> CachedReadsDbMut<'a, DB> { + /// Converts this [`Database`] implementation into a [`DatabaseRef`] that will still cache + /// reads. + pub const fn into_db(self) -> CachedReadsDBRef<'a, DB> { + CachedReadsDBRef { inner: RefCell::new(self) } + } + + /// Returns access to wrapped [`DatabaseRef`]. + pub const fn inner(&self) -> &DB { + &self.db + } +} + impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; From d8100012798c540f71266e757bc5dbcddd508b63 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 1 Nov 2024 09:34:48 -0600 Subject: [PATCH 283/970] renamed OptimismNetworkBuilder to OpNetworkBuilder (#12255) --- crates/optimism/node/src/node.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 925e7204c9e..56487fb2435 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -68,7 +68,7 @@ impl OptimismNode { Node, OpPoolBuilder, OpPayloadBuilder, - OptimismNetworkBuilder, + OpNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, > @@ -82,7 +82,7 @@ impl OptimismNode { .node_types::() .pool(OpPoolBuilder::default()) .payload(OpPayloadBuilder::new(compute_pending_block)) - .network(OptimismNetworkBuilder { + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, }) @@ -101,7 +101,7 @@ where N, OpPoolBuilder, OpPayloadBuilder, - OptimismNetworkBuilder, + OpNetworkBuilder, OptimismExecutorBuilder, OptimismConsensusBuilder, >; @@ -365,17 +365,17 @@ where /// A basic optimism network builder. #[derive(Debug, Default, Clone)] -pub struct OptimismNetworkBuilder { +pub struct OpNetworkBuilder { /// Disable transaction pool gossip pub disable_txpool_gossip: bool, /// Disable discovery v4 pub disable_discovery_v4: bool, } -impl OptimismNetworkBuilder { +impl OpNetworkBuilder { /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. /// - /// This applies the configured [`OptimismNetworkBuilder`] settings. + /// This applies the configured [`OpNetworkBuilder`] settings. pub fn network_config( &self, ctx: &BuilderContext, @@ -420,7 +420,7 @@ impl OptimismNetworkBuilder { } } -impl NetworkBuilder for OptimismNetworkBuilder +impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, From 41c4bab0f781f7b67fbc430797ff96839149c603 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 17:02:49 +0100 Subject: [PATCH 284/970] chore: use deref directly (#12256) --- crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | 4 +--- crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index f2d1416139b..a0065d79342 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -399,9 +399,7 @@ pub trait LoadPendingBlock: execution_outcome.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root - let state_provider = &db.database; - let state_root = - state_provider.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; + let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; // create the block header let transactions_root = calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 4249c78fe6a..62f0e24b1c6 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -274,7 +274,7 @@ pub fn build_block( } } - let state_root = db.db.0.state_root(hashed_state)?; + let state_root = db.db.state_root(hashed_state)?; let header = reth_primitives::Header { beneficiary: block_env.coinbase, From bc69f6348f553802c94a774e0305ca57b018aaf9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 18:08:41 +0100 Subject: [PATCH 285/970] feat: add asref impls (#12257) --- crates/revm/src/cached.rs | 9 +++++++++ crates/revm/src/database.rs | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index 2152ca5bd73..807b163c4cb 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -81,6 +81,15 @@ impl<'a, DB> CachedReadsDbMut<'a, DB> { } } +impl AsRef for CachedReadsDbMut<'_, DB> +where + DB: AsRef, +{ + fn as_ref(&self) -> &T { + self.inner().as_ref() + } +} + impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 5f662fea7cf..682aca6cf37 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -79,6 +79,12 @@ impl StateProviderDatabase { } } +impl AsRef for StateProviderDatabase { + fn as_ref(&self) -> &DB { + self + } +} + impl Deref for StateProviderDatabase { type Target = DB; From 969ca3e63b2203abf7537808bf3513971ebf210b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Sat, 2 Nov 2024 02:14:22 +0900 Subject: [PATCH 286/970] fix: check hashed state for loading `TriePrefixSets::destroyed_accounts` (#12235) --- crates/trie/db/src/prefix_set.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index 079fe393764..cd50503bc70 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -36,13 +36,13 @@ impl PrefixSetLoader<'_, TX> { // Walk account changeset and insert account prefixes. let mut account_changeset_cursor = self.cursor_read::()?; - let mut account_plain_state_cursor = self.cursor_read::()?; + let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; let hashed_address = keccak256(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account_plain_state_cursor.seek_exact(address)?.is_none() { + if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { destroyed_accounts.insert(hashed_address); } } From 166a2346dcd2a9c8d5fea6995a9fc5e6426c62bb Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:19:47 -0600 Subject: [PATCH 287/970] renamed OptimismExecutorBuilder to OpExecutorBuilder (#12258) --- crates/optimism/node/src/node.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 56487fb2435..b4a4d2730ab 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -69,7 +69,7 @@ impl OptimismNode { OpPoolBuilder, OpPayloadBuilder, OpNetworkBuilder, - OptimismExecutorBuilder, + OpExecutorBuilder, OptimismConsensusBuilder, > where @@ -86,7 +86,7 @@ impl OptimismNode { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, }) - .executor(OptimismExecutorBuilder::default()) + .executor(OpExecutorBuilder::default()) .consensus(OptimismConsensusBuilder::default()) } } @@ -102,7 +102,7 @@ where OpPoolBuilder, OpPayloadBuilder, OpNetworkBuilder, - OptimismExecutorBuilder, + OpExecutorBuilder, OptimismConsensusBuilder, >; @@ -179,9 +179,9 @@ where /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -pub struct OptimismExecutorBuilder; +pub struct OpExecutorBuilder; -impl ExecutorBuilder for OptimismExecutorBuilder +impl ExecutorBuilder for OpExecutorBuilder where Node: FullNodeTypes>, { From fdf10a7dc26bdaf669341eb095f3a801b595b9a4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 21:58:59 +0100 Subject: [PATCH 288/970] test: make cargo t compile in codecs (#12261) --- crates/storage/codecs/src/alloy/mod.rs | 2 +- crates/storage/codecs/src/alloy/transaction/eip4844.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index f1bf6a00e69..697bac901e4 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -7,7 +7,7 @@ macro_rules! cond_mod { #[cfg(feature = "test-utils")] pub mod $mod_name; #[cfg(not(feature = "test-utils"))] - mod $mod_name; + pub(crate) mod $mod_name; )* }; } diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 5ec36e06bf5..c89e2b0785b 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -106,7 +106,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TxEip4844 { } } -#[cfg(any(test, feature = "test-utils"))] +#[cfg(feature = "test-utils")] fn serialize_placeholder(value: &Option<()>, serializer: S) -> Result where S: serde::Serializer, @@ -119,7 +119,7 @@ where } } -#[cfg(any(test, feature = "test-utils"))] +#[cfg(feature = "test-utils")] fn deserialize_placeholder<'de, D>(deserializer: D) -> Result, D::Error> where D: serde::Deserializer<'de>, From a911104fe9ccb04c0946e0b11ac14337eec6b7e0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Nov 2024 22:03:17 +0100 Subject: [PATCH 289/970] test: make cargo t compile in db-models (#12263) --- Cargo.lock | 2 +- crates/storage/db-models/Cargo.toml | 10 ++++++---- crates/storage/db-models/src/accounts.rs | 2 +- crates/storage/db-models/src/blocks.rs | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c469cd21e2..6491a47eb4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6920,7 +6920,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "reth-codecs", - "reth-primitives", + "reth-primitives-traits", "serde", "test-fuzz", ] diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index d5f773347b0..44b291959ba 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true @@ -32,20 +32,22 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # reth -reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-codecs.workspace = true +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true [features] test-utils = [ + "reth-primitives-traits/test-utils", "arbitrary", - "reth-primitives/test-utils", "reth-codecs/test-utils" ] arbitrary = [ - "reth-primitives/arbitrary", + "reth-primitives-traits/arbitrary", "dep:arbitrary", "dep:proptest", "alloy-primitives/arbitrary", diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index acfd45fe34e..29a5cf30592 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -2,7 +2,7 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; use alloy_primitives::{bytes::Buf, Address}; -use reth_primitives::Account; +use reth_primitives_traits::Account; /// Account as it is saved in the database. /// diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index 3e740a2e1aa..b4399dc1e27 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -2,7 +2,7 @@ use std::ops::Range; use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Withdrawals; +use reth_primitives_traits::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. From c72f11cc851073518d670960b2930b74aa36b9b5 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 1 Nov 2024 15:34:14 -0600 Subject: [PATCH 290/970] renamed OptimismConsensusBuilder to OpConsensusBuilder (#12265) --- crates/optimism/node/src/node.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b4a4d2730ab..6a1f94cb788 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -70,7 +70,7 @@ impl OptimismNode { OpPayloadBuilder, OpNetworkBuilder, OpExecutorBuilder, - OptimismConsensusBuilder, + OpConsensusBuilder, > where Node: FullNodeTypes< @@ -87,7 +87,7 @@ impl OptimismNode { disable_discovery_v4: !discovery_v4, }) .executor(OpExecutorBuilder::default()) - .consensus(OptimismConsensusBuilder::default()) + .consensus(OpConsensusBuilder::default()) } } @@ -103,7 +103,7 @@ where OpPayloadBuilder, OpNetworkBuilder, OpExecutorBuilder, - OptimismConsensusBuilder, + OpConsensusBuilder, >; type AddOns = OptimismAddOns< @@ -442,9 +442,9 @@ where /// A basic optimism consensus builder. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismConsensusBuilder; +pub struct OpConsensusBuilder; -impl ConsensusBuilder for OptimismConsensusBuilder +impl ConsensusBuilder for OpConsensusBuilder where Node: FullNodeTypes>, { From d8bbd36b2f081b3e7d4eb5c7cb90c4e8ab9343e0 Mon Sep 17 00:00:00 2001 From: Ryan Schneider Date: Fri, 1 Nov 2024 14:35:47 -0700 Subject: [PATCH 291/970] feat: flashbots_validateBuilderSubmissionV4 (#12243) --- crates/ethereum/node/tests/e2e/rpc.rs | 90 +++++++++++++++++++++++++-- crates/rpc/rpc-api/src/lib.rs | 5 +- crates/rpc/rpc-api/src/validation.rs | 24 +++++++ crates/rpc/rpc/src/validation.rs | 38 ++++++++++- 4 files changed, 149 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index c8b127b9b7f..1f7ac32e048 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -2,12 +2,15 @@ use crate::utils::eth_payload_attributes; use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; -use alloy_rpc_types_beacon::relay::{BidTrace, SignedBidSubmissionV3}; +use alloy_rpc_types_beacon::relay::{BidTrace, SignedBidSubmissionV3, SignedBidSubmissionV4}; use rand::{rngs::StdRng, Rng, SeedableRng}; -use reth::rpc::{ - api::BuilderBlockValidationRequestV3, - compat::engine::payload::block_to_payload_v3, - types::{engine::BlobsBundleV1, TransactionRequest}, +use reth::{ + payload::BuiltPayload, + rpc::{ + api::{BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4}, + compat::engine::payload::block_to_payload_v3, + types::{engine::BlobsBundleV1, TransactionRequest}, + }, }; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; @@ -115,7 +118,7 @@ async fn test_fee_history() -> eyre::Result<()> { } #[tokio::test] -async fn test_flashbots_validate() -> eyre::Result<()> { +async fn test_flashbots_validate_v3() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let chain_spec = Arc::new( @@ -187,3 +190,78 @@ async fn test_flashbots_validate() -> eyre::Result<()> { .is_err()); Ok(()) } + +#[tokio::test] +async fn test_flashbots_validate_v4() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV4 { + request: SignedBidSubmissionV4 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + execution_requests: payload.requests().unwrap_or_default().to_vec(), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .expect("request should validate"); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + Ok(()) +} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 63e6e54466d..0a4fa9f660e 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -46,7 +46,10 @@ pub mod servers { rpc::RpcApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, - validation::{BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3}, + validation::{ + BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, + BuilderBlockValidationRequestV4, + }, web3::Web3ApiServer, }; pub use reth_rpc_eth_api::{ diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index d8f55b668c9..797eee7ae52 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -3,6 +3,7 @@ use alloy_primitives::B256; use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, SignedBidSubmissionV3, + SignedBidSubmissionV4, }; use jsonrpsee::proc_macros::rpc; use serde::{Deserialize, Serialize}; @@ -24,6 +25,22 @@ pub struct BuilderBlockValidationRequestV3 { pub parent_beacon_block_root: B256, } +/// A Request to validate a [`SignedBidSubmissionV4`] +/// +/// +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BuilderBlockValidationRequestV4 { + /// The request to be validated. + #[serde(flatten)] + pub request: SignedBidSubmissionV4, + /// The registered gas limit for the validation request. + #[serde_as(as = "DisplayFromStr")] + pub registered_gas_limit: u64, + /// The parent beacon block root for the validation request. + pub parent_beacon_block_root: B256, +} + /// Block validation rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "flashbots"))] @@ -48,4 +65,11 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV3, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV4")] + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index fe9d0eb4475..1476180d431 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -20,7 +20,10 @@ use reth_provider::{ StateProviderFactory, WithdrawalsProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_api::{BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3}; +use reth_rpc_api::{ + BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, + BuilderBlockValidationRequestV4, +}; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_trie::HashedPostState; @@ -415,4 +418,37 @@ where .map_err(|e| RethError::Other(e.into())) .to_rpc_result() } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }, + request.request.execution_requests.into(), + ), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() + } } From d7ead13bda6551958d5595cd20eac5a873debd48 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 2 Nov 2024 11:17:31 +0400 Subject: [PATCH 292/970] fix: clippy lints (#12269) --- .github/workflows/lint.yml | 4 ++-- crates/chain-state/src/notifications.rs | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4723d8a4d57..fa7b4f9f45c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,10 +18,10 @@ jobs: matrix: include: - type: ethereum - args: --bin reth --workspace --locked + args: --bin reth --workspace --lib --examples --tests --benches --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism - args: --bin op-reth --workspace --locked + args: --bin op-reth --workspace --lib --examples --tests --benches --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: book args: --manifest-path book/sources/Cargo.toml --workspace --bins diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 6e24bcbb4c8..582e1d2a05d 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -297,6 +297,7 @@ mod tests { block2.set_hash(block2_hash); // Create a receipt for the transaction in block1. + #[allow(clippy::needless_update)] let receipt1 = Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 12345, @@ -346,6 +347,7 @@ mod tests { old_block1.block.body.transactions.push(TransactionSigned::default()); // Create a receipt for a transaction in the reverted block. + #[allow(clippy::needless_update)] let old_receipt = Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 54321, @@ -368,6 +370,7 @@ mod tests { new_block1.block.body.transactions.push(TransactionSigned::default()); // Create a receipt for a transaction in the new committed block. + #[allow(clippy::needless_update)] let new_receipt = Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 12345, From 962fa6685bd5763f8a162a1f9f9e67a92c9548a3 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sat, 2 Nov 2024 01:41:11 -0600 Subject: [PATCH 293/970] Use Arc in SystemCaller (#12268) Co-authored-by: Matthias Seitz --- crates/engine/util/src/reorg.rs | 2 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/evm/src/system_calls/mod.rs | 6 +++--- crates/optimism/evm/src/execute.rs | 2 +- crates/optimism/payload/src/builder.rs | 2 +- crates/payload/validator/src/lib.rs | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 0d51d2dfab6..69831389a65 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -303,7 +303,7 @@ where let mut evm = evm_config.evm_with_env(&mut state, env); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller.apply_beacon_root_contract_call( reorg_target.timestamp, diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f082a3a707e..fa14e260d65 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -95,7 +95,7 @@ where { /// Creates a new [`EthExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); Self { state, chain_spec, evm_config, system_caller } } } diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index daaf1d1414f..7fdb31d967d 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,7 +1,7 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, vec}; +use alloc::{boxed::Box, sync::Arc, vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::Bytes; use core::fmt::Display; @@ -46,7 +46,7 @@ impl OnStateHook for NoopHook { #[allow(missing_debug_implementations)] pub struct SystemCaller { evm_config: EvmConfig, - chain_spec: Chainspec, + chain_spec: Arc, /// Optional hook to be called after each state change. hook: Option>, } @@ -54,7 +54,7 @@ pub struct SystemCaller { impl SystemCaller { /// Create a new system caller with the given EVM config, database, and chain spec, and creates /// the EVM with the given initialized config and block environment. - pub const fn new(evm_config: EvmConfig, chain_spec: Chainspec) -> Self { + pub const fn new(evm_config: EvmConfig, chain_spec: Arc) -> Self { Self { evm_config, chain_spec, hook: None } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 9c5db9d4b61..c2b00614436 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -90,7 +90,7 @@ where { /// Creates a new [`OpExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); Self { state, chain_spec, evm_config, system_caller } } } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index cae2d34bd49..0cf45835a23 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -197,7 +197,7 @@ where chain_spec.is_regolith_active_at_timestamp(attributes.payload_attributes.timestamp); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), &chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller .pre_block_beacon_root_contract_call( diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 38e53bac42a..e74b5f48d40 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -31,7 +31,7 @@ impl ExecutionPayloadValidator { /// Returns the chain spec used by the validator. #[inline] - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } From 9593f4d08d25018eac0173fadff8768ce7d241b7 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sat, 2 Nov 2024 02:42:42 -0600 Subject: [PATCH 294/970] renamed OptimismGenesisInfo to OpGenesisInfo (#12270) --- crates/optimism/chainspec/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 8ad36c66afb..3248625d604 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -351,7 +351,7 @@ impl OptimismHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { use reth_optimism_forks::OptimismHardfork; - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis); let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); @@ -441,12 +441,12 @@ impl From for OpChainSpec { } #[derive(Default, Debug)] -struct OptimismGenesisInfo { +struct OpGenesisInfo { optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo, base_fee_params: BaseFeeParamsKind, } -impl OptimismGenesisInfo { +impl OpGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { let mut info = Self { optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo::extract_from( From dbdf60ba119c04df52dddba0e1e9a8f2843b063f Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sat, 2 Nov 2024 03:22:58 -0600 Subject: [PATCH 295/970] renamed OptimismBeaconConsensus to OpBeaconConsensus (#12271) --- crates/optimism/consensus/src/lib.rs | 8 ++++---- crates/optimism/node/src/node.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 16c1d5d37d7..bf1428815d0 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -34,19 +34,19 @@ pub use validation::validate_block_post_execution; /// /// Provides basic checks as outlined in the execution specs. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismBeaconConsensus { +pub struct OpBeaconConsensus { /// Configuration chain_spec: Arc, } -impl OptimismBeaconConsensus { - /// Create a new instance of [`OptimismBeaconConsensus`] +impl OpBeaconConsensus { + /// Create a new instance of [`OpBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for OptimismBeaconConsensus { +impl Consensus for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 6a1f94cb788..87266767da2 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -19,7 +19,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::OptimismBeaconConsensus; +use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpExecutionStrategyFactory, OptimismEvmConfig}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; @@ -454,7 +454,7 @@ where if ctx.is_dev() { Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) } else { - Ok(Arc::new(OptimismBeaconConsensus::new(ctx.chain_spec()))) + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } } From c74d2a06f2d84a96f779e62293ef90b3c984c56a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 2 Nov 2024 11:30:17 +0100 Subject: [PATCH 296/970] chore(ci): unpin kurtosis (#12272) --- .github/workflows/kurtosis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 43f5c3605ab..74d26dbd3ee 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -79,7 +79,6 @@ jobs: - name: Run kurtosis uses: ethpandaops/kurtosis-assertoor-github-action@v1 with: - kurtosis_version: 1.3.1 ethereum_package_args: '.github/assets/kurtosis_network_params.yaml' notify-on-error: From f2de55d8fe4988e8580c18a8302d642ce87a031d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 2 Nov 2024 13:16:08 +0100 Subject: [PATCH 297/970] chore: make some fields owned (#12274) --- crates/node/api/src/node.rs | 8 ++++---- crates/node/builder/src/launch/engine.rs | 6 +++--- crates/node/builder/src/launch/mod.rs | 6 +++--- crates/node/builder/src/rpc.rs | 10 ++++++---- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 3173fd2b398..b016e01c295 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -91,17 +91,17 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { } /// Context passed to [`NodeAddOns::launch_add_ons`], -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AddOnsContext<'a, N: FullNodeComponents> { /// Node with all configured components. - pub node: &'a N, + pub node: N, /// Node configuration. pub config: &'a NodeConfig<::ChainSpec>, /// Handle to the beacon consensus engine. pub beacon_engine_handle: - &'a BeaconConsensusEngineHandle<::Engine>, + BeaconConsensusEngineHandle<::Engine>, /// JWT secret for the node. - pub jwt_secret: &'a JwtSecret, + pub jwt_secret: JwtSecret, } /// Customizable node add-on types. diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 3de651cdcd0..85401b8b958 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -286,10 +286,10 @@ where let jwt_secret = ctx.auth_jwt_secret()?; let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), + node: ctx.node_adapter().clone(), config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, + beacon_engine_handle, + jwt_secret, }; let RpcHandle { rpc_server_handles, rpc_registry } = diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 50438e79d2b..a623a7a9f23 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -329,10 +329,10 @@ where let jwt_secret = ctx.auth_jwt_secret()?; let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), + node: ctx.node_adapter().clone(), config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, + beacon_engine_handle, + jwt_secret, }; let RpcHandle { rpc_server_handles, rpc_registry } = diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 8819aa4ac4f..8af1527cbcd 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -409,9 +409,11 @@ where type Handle = RpcHandle; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { - let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; + let engine_validator = engine_validator_builder.build(&ctx).await?; + let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; + let client = ClientVersionV1 { code: CLIENT_CODE, name: NAME_CLIENT.to_string(), @@ -422,17 +424,17 @@ where let engine_api = EngineApi::new( node.provider().clone(), config.chain.clone(), - beacon_engine_handle.clone(), + beacon_engine_handle, node.payload_builder().clone().into(), node.pool().clone(), Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - engine_validator_builder.build(&ctx).await?, + engine_validator, ); info!(target: "reth::cli", "Engine API handler initialized"); - let auth_config = config.rpc.auth_server_config(*jwt_secret)?; + let auth_config = config.rpc.auth_server_config(jwt_secret)?; let module_config = config.rpc.transport_rpc_module_config(); debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); From 2aba40b17c57c6a6f6bd68f1fa32279d8e7b0d0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:22:17 +0700 Subject: [PATCH 298/970] feat: add `CachedReads::extend` (#12277) Co-authored-by: Matthias Seitz --- crates/revm/src/cached.rs | 63 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index 807b163c4cb..88a41e1d895 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -57,6 +57,15 @@ impl CachedReads { ) { self.accounts.insert(address, CachedAccount { info: Some(info), storage }); } + + /// Extends current cache with entries from another [`CachedReads`] instance. + /// + /// Note: It is expected that both instances are based on the exact same state. + pub fn extend(&mut self, other: Self) { + self.accounts.extend(other.accounts); + self.contracts.extend(other.contracts); + self.block_hashes.extend(other.block_hashes); + } } /// A [Database] that caches reads inside [`CachedReads`]. @@ -184,3 +193,57 @@ impl CachedAccount { Self { info, storage: HashMap::default() } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extend_with_two_cached_reads() { + // Setup test data + let hash1 = B256::from_slice(&[1u8; 32]); + let hash2 = B256::from_slice(&[2u8; 32]); + let address1 = Address::from_slice(&[1u8; 20]); + let address2 = Address::from_slice(&[2u8; 20]); + + // Create primary cache + let mut primary = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address1, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash1, Bytecode::default()); + cache.block_hashes.insert(1, hash1); + cache + }; + + // Create additional cache + let additional = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address2, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash2, Bytecode::default()); + cache.block_hashes.insert(2, hash2); + cache + }; + + // Extending primary with additional cache + primary.extend(additional); + + // Verify the combined state + assert!( + primary.accounts.len() == 2 && + primary.contracts.len() == 2 && + primary.block_hashes.len() == 2, + "All maps should contain 2 entries" + ); + + // Verify specific entries + assert!( + primary.accounts.contains_key(&address1) && + primary.accounts.contains_key(&address2) && + primary.contracts.contains_key(&hash1) && + primary.contracts.contains_key(&hash2) && + primary.block_hashes.get(&1) == Some(&hash1) && + primary.block_hashes.get(&2) == Some(&hash2), + "All expected entries should be present" + ); + } +} From af38a1a2d0631d0f37c4da29d6aa8f86f7dec0d9 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sat, 2 Nov 2024 12:04:25 -0600 Subject: [PATCH 299/970] renamed OptimismPayloadTypes to OpPayloadTypes (#12281) --- crates/optimism/node/src/engine.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 0f48dc34706..af517b00ea0 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -22,7 +22,7 @@ use reth_optimism_payload_builder::{ /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes { +pub struct OptimismEngineTypes { _marker: std::marker::PhantomData, } @@ -48,9 +48,9 @@ where /// A default payload type for [`OptimismEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismPayloadTypes; +pub struct OpPayloadTypes; -impl PayloadTypes for OptimismPayloadTypes { +impl PayloadTypes for OpPayloadTypes { type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; type PayloadBuilderAttributes = OpPayloadBuilderAttributes; From adff5a9429e2f5f65e0b183526a554664161b4f4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 2 Nov 2024 22:14:10 +0400 Subject: [PATCH 300/970] feat: remove AnyNetwork usage (#12280) --- Cargo.lock | 5 - crates/optimism/rpc/src/eth/receipt.rs | 96 +++------ crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 5 - crates/rpc/rpc-builder/src/lib.rs | 9 +- crates/rpc/rpc-builder/tests/it/http.rs | 7 +- crates/rpc/rpc-builder/tests/it/middleware.rs | 2 +- crates/rpc/rpc-eth-api/src/types.rs | 15 +- crates/rpc/rpc-eth-types/Cargo.toml | 1 - crates/rpc/rpc-eth-types/src/lib.rs | 2 +- crates/rpc/rpc-eth-types/src/receipt.rs | 186 ++++++++++-------- crates/rpc/rpc-types-compat/Cargo.toml | 3 +- .../rpc-types-compat/src/transaction/mod.rs | 25 +-- crates/rpc/rpc/src/eth/core.rs | 5 +- crates/rpc/rpc/src/eth/helpers/block.rs | 10 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 5 +- crates/rpc/rpc/src/eth/helpers/types.rs | 60 +++--- crates/rpc/rpc/src/eth/pubsub.rs | 19 +- 18 files changed, 183 insertions(+), 274 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6491a47eb4f..8aab2c0a8eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8725,13 +8725,10 @@ dependencies = [ name = "reth-rpc-builder" version = "1.1.0" dependencies = [ - "alloy-network", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-trace", - "alloy-serde", "clap", "http", "jsonrpsee", @@ -8860,7 +8857,6 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types", "alloy-rpc-types-eth", - "alloy-serde", "alloy-sol-types", "derive_more 1.0.0", "futures", @@ -8936,7 +8932,6 @@ dependencies = [ "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-serde", "reth-primitives", "reth-trie-common", "serde", diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 2734fb5458c..f8e6b7fc21e 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,7 +1,7 @@ //! Loads and formats OP receipt RPC response. use alloy_eips::eip2718::Encodable2718; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; +use alloy_rpc_types::{Log, TransactionReceipt}; use op_alloy_consensus::{ DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, }; @@ -13,7 +13,7 @@ use reth_optimism_forks::OptimismHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; +use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use crate::{OpEthApi, OpEthApiError}; @@ -172,9 +172,7 @@ impl OpReceiptFieldsBuilder { #[derive(Debug)] pub struct OpReceiptBuilder { /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. - pub core_receipt: TransactionReceipt>, - /// Transaction type. - pub tx_type: TxType, + pub core_receipt: TransactionReceipt>, /// Additional OP receipt fields. pub op_receipt_fields: OpTransactionReceiptFields, } @@ -189,11 +187,29 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { - let ReceiptBuilder { base: core_receipt, .. } = - ReceiptBuilder::new(transaction, meta, receipt, all_receipts) - .map_err(OpEthApiError::Eth)?; - - let tx_type = transaction.tx_type(); + let core_receipt = + build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), + TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), + TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), + TxType::Eip4844 => { + // TODO: unreachable + OpReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), + TxType::Deposit => { + OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { + receipt: OpDepositReceipt:: { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) + } + } + })?; let op_receipt_fields = OpReceiptFieldsBuilder::default() .l1_block_info(chain_spec, transaction, l1_block_info)? @@ -201,69 +217,15 @@ impl OpReceiptBuilder { .deposit_version(receipt.deposit_receipt_version) .build(); - Ok(Self { core_receipt, tx_type, op_receipt_fields }) + Ok(Self { core_receipt, op_receipt_fields }) } /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { - let Self { core_receipt, tx_type, op_receipt_fields } = self; - - let OpTransactionReceiptFields { l1_block_info, deposit_nonce, deposit_receipt_version } = - op_receipt_fields; - - let TransactionReceipt { - inner: AnyReceiptEnvelope { inner: receipt_with_bloom, .. }, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - authorization_list, - } = core_receipt; - - let inner = match tx_type { - TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - TxType::Eip4844 => { - // TODO: unreachable - OpReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), - TxType::Deposit => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { - inner: receipt_with_bloom.receipt, - deposit_nonce, - deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } - }; + let Self { core_receipt: inner, op_receipt_fields } = self; - let inner = TransactionReceipt::> { - inner, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - authorization_list, - }; + let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields; OpTransactionReceipt { inner, l1_block_info } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3994afe1984..6b5954391d9 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -86,7 +86,7 @@ where let signed_tx = tx.clone().into_signed(); let hash = tx.hash; - let mut inner = EthTxBuilder.fill(tx, tx_info).inner; + let mut inner = EthTxBuilder.fill(tx, tx_info); if signed_tx.is_deposit() { inner.gas_price = Some(signed_tx.max_fee_per_gas()) diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 711e4438133..2d10dabf8af 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -31,11 +31,6 @@ reth-evm.workspace = true reth-engine-primitives.workspace = true reth-primitives.workspace = true -# ethereum -alloy-network.workspace = true -alloy-rpc-types.workspace = true -alloy-serde.workspace = true - # rpc/net jsonrpsee = { workspace = true, features = ["server"] } tower-http = { workspace = true, features = ["full"] } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 696d3501430..385b92af3d0 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1094,14 +1094,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt - + EthTransactions< - NetworkTypes: alloy_network::Network< - TransactionResponse = alloy_serde::WithOtherFields< - alloy_rpc_types::Transaction, - >, - >, - >, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7a8093c5062..ed9ef56d62b 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -3,11 +3,10 @@ use crate::utils::{launch_http, launch_http_ws, launch_ws}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; -use alloy_rpc_types::{ - Block, FeeHistory, Filter, Index, Log, PendingTransactionFilterKind, SyncStatus, Transaction, - TransactionReceipt, +use alloy_rpc_types_eth::{ + transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, + PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::filter::TraceFilter; use jsonrpsee::{ core::{ diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index bcc26dcad89..0e2186e56ee 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,5 +1,5 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types::{Block, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Receipt, Transaction}; use jsonrpsee::{ server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, types::Request, diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 653730ed3c9..1d176dd1e86 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -2,16 +2,15 @@ use std::{error::Error, fmt}; -use alloy_network::{AnyNetwork, Network}; +use alloy_network::Network; use alloy_rpc_types::Block; -use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { - /// Extension of [`EthApiError`], with network specific errors. + /// Extension of [`FromEthApiError`], with network specific errors. type Error: Into> + FromEthApiError + AsEthApiError @@ -28,16 +27,6 @@ pub trait EthApiTypes: Send + Sync + Clone { fn tx_resp_builder(&self) -> &Self::TransactionCompat; } -impl EthApiTypes for () { - type Error = EthApiError; - type NetworkTypes = AnyNetwork; - type TransactionCompat = (); - - fn tx_resp_builder(&self) -> &Self::TransactionCompat { - self - } -} - /// Adapter for network specific transaction type. pub type RpcTransaction = ::TransactionResponse; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 46a0d7b5c32..80901bcf812 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -36,7 +36,6 @@ revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } alloy-rpc-types.workspace = true -alloy-serde.workspace = true alloy-eips.workspace = true # rpc diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index fa36dae4c88..03c23dc3456 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -37,5 +37,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::ReceiptBuilder; +pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 198ca79aa2a..0734b547ec8 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,24 +1,101 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use alloy_consensus::Transaction; +use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, ReceiptWithBloom, TransactionReceipt}; -use alloy_serde::OtherFields; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use alloy_rpc_types::{Log, ReceiptWithBloom, TransactionReceipt}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use revm_primitives::calc_blob_gasprice; use super::{EthApiError, EthResult}; +/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. +pub fn build_receipt( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + build_envelope: impl FnOnce(ReceiptWithBloom) -> T, +) -> EthResult> { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = + transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let logs: Vec = receipt + .logs + .iter() + .enumerate() + .map(|(tx_log_idx, log)| Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }) + .collect(); + + let rpc_receipt = alloy_rpc_types::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + Ok(TransactionReceipt { + inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + authorization_list: transaction.authorization_list().map(|l| l.to_vec()), + }) +} + /// Receipt response builder. #[derive(Debug)] -pub struct ReceiptBuilder { +pub struct EthReceiptBuilder { /// The base response body, contains L1 fields. - pub base: TransactionReceipt>, - /// Additional L2 fields. - pub other: OtherFields, + pub base: TransactionReceipt, } -impl ReceiptBuilder { +impl EthReceiptBuilder { /// Returns a new builder with the base response body (L1 fields) set. /// /// Note: This requires _all_ block receipts because we need to calculate the gas used by the @@ -29,88 +106,23 @@ impl ReceiptBuilder { receipt: &Receipt, all_receipts: &[Receipt], ) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction - .recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = - blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } - - let logs: Vec = receipt - .logs - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types::Receipt { - status: receipt.success.into(), - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; - - let (contract_address, to) = match transaction.transaction.kind() { - TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - #[allow(clippy::needless_update)] - let base = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used: gas_used as u128, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - authorization_list: transaction.authorization_list().map(|l| l.to_vec()), - }; + let base = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), + TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), + TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), + TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), + TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + })?; - Ok(Self { base, other: Default::default() }) + Ok(Self { base }) } /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> TransactionReceipt> { + pub fn build(self) -> TransactionReceipt { self.base } } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 7d5eac9dbb9..b9a9e5f0361 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -22,7 +22,6 @@ alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -30,4 +29,4 @@ alloy-consensus.workspace = true serde.workspace = true [dev-dependencies] -serde_json.workspace = true \ No newline at end of file +serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 2d92747d401..16742144f25 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -8,9 +8,8 @@ use std::fmt; use alloy_consensus::Transaction as _; use alloy_rpc_types::{ request::{TransactionInput, TransactionRequest}, - Transaction, TransactionInfo, + TransactionInfo, }; -use alloy_serde::WithOtherFields; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; use serde::{Deserialize, Serialize}; @@ -89,28 +88,6 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { fn tx_type(tx: &Self::Transaction) -> u8; } -impl TransactionCompat for () { - // this noop impl depends on integration in `reth_rpc_eth_api::EthApiTypes` noop impl, and - // `alloy_network::AnyNetwork` - type Transaction = WithOtherFields; - - fn fill( - &self, - _tx: TransactionSignedEcRecovered, - _tx_info: TransactionInfo, - ) -> Self::Transaction { - WithOtherFields::default() - } - - fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.input = tx.input.slice(..4); - } - - fn tx_type(_tx: &Self::Transaction) -> u8 { - 0 - } -} - /// Gas price and max fee per gas for a transaction. Helper type to format transaction RPC response. #[derive(Debug, Default)] pub struct GasPrice { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 026c87153f2..98ac9e9f409 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,7 +3,7 @@ use std::sync::Arc; -use alloy_network::AnyNetwork; +use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; use reth_primitives::BlockNumberOrTag; @@ -132,8 +132,7 @@ where Self: Send + Sync, { type Error = EthApiError; - // todo: replace with alloy_network::Ethereum - type NetworkTypes = AnyNetwork; + type NetworkTypes = Ethereum; type TransactionCompat = EthTxBuilder; fn tx_resp_builder(&self) -> &Self::TransactionCompat { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index d5341d0b22b..1e2d1802e0d 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,14 +1,13 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_rpc_types::{AnyTransactionReceipt, BlockId}; -use alloy_serde::WithOtherFields; +use alloy_rpc_types::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, }; -use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; @@ -16,7 +15,7 @@ impl EthBlocks for EthApi, + NetworkTypes: alloy_network::Network, Provider: HeaderProvider, >, { @@ -51,9 +50,8 @@ where excess_blob_gas, timestamp, }; - ReceiptBuilder::new(&tx, meta, receipt, &receipts) + EthReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) - .map(WithOtherFields::new) }) .collect::, Self::Error>>() .map(Some) diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index d0cb5867eac..594cffd09f2 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,9 +1,8 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use alloy_serde::WithOtherFields; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; @@ -26,6 +25,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - Ok(WithOtherFields::new(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build())) + Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 848bcdc365a..0998c057e29 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,10 +1,9 @@ //! L1 `eth` API types. use alloy_consensus::Transaction as _; -use alloy_network::{AnyNetwork, Network}; +use alloy_network::{Ethereum, Network}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{Transaction, TransactionInfo}; -use alloy_serde::WithOtherFields; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types_compat::{ transaction::{from_primitive_signature, GasPrice}, @@ -19,7 +18,7 @@ impl TransactionCompat for EthTxBuilder where Self: Send + Sync, { - type Transaction = ::TransactionResponse; + type Transaction = ::TransactionResponse; fn fill( &self, @@ -53,41 +52,38 @@ where signed_tx.chain_id(), ); - WithOtherFields { - inner: Transaction { - hash: signed_tx.hash(), - nonce: signed_tx.nonce(), - from: signer, - to, - value: signed_tx.value(), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), - signature: Some(signature), - gas: signed_tx.gas_limit(), - input, - chain_id, - access_list, - transaction_type: Some(signed_tx.tx_type() as u8), - // These fields are set to None because they are not stored as part of the - // transaction - block_hash, - block_number, - transaction_index, - // EIP-4844 fields - max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), - blob_versioned_hashes, - authorization_list, - }, - ..Default::default() + Transaction { + hash: signed_tx.hash(), + nonce: signed_tx.nonce(), + from: signer, + to, + value: signed_tx.value(), + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), + signature: Some(signature), + gas: signed_tx.gas_limit(), + input, + chain_id, + access_list, + transaction_type: Some(signed_tx.tx_type() as u8), + // These fields are set to None because they are not stored as part of the + // transaction + block_hash, + block_number, + transaction_index, + // EIP-4844 fields + max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), + blob_versioned_hashes, + authorization_list, } } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); + tx.input = tx.input.slice(..4); } fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or(0) + tx.transaction_type.unwrap_or(0) } } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index ac962610ef8..663ec0b99d6 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -8,9 +8,8 @@ use alloy_rpc_types::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - FilteredParams, Header, Log, Transaction, + FilteredParams, Header, Log, }; -use alloy_serde::WithOtherFields; use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, @@ -123,11 +122,9 @@ where { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub.new_headers_stream().map(|header| { - EthSubscriptionResult::>::Header(Box::new( - header.into(), - )) - }); + let stream = pubsub + .new_headers_stream() + .map(|header| EthSubscriptionResult::<()>::Header(Box::new(header.into()))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Logs => { @@ -139,9 +136,9 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub.log_stream(filter).map(|log| { - EthSubscriptionResult::>::Log(Box::new(log)) - }); + let stream = pubsub + .log_stream(filter) + .map(|log| EthSubscriptionResult::<()>::Log(Box::new(log))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::NewPendingTransactions => { @@ -170,7 +167,7 @@ where let stream = pubsub .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::>::TransactionHash); + .map(EthSubscriptionResult::<()>::TransactionHash); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Syncing => { From e374e4bfe9c4779828c73f39ffc30d99850286ec Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sat, 2 Nov 2024 13:18:31 -0600 Subject: [PATCH 301/970] renamed OptimismEvmConfig to OpEvmConfig (#12284) --- crates/optimism/evm/src/execute.rs | 8 ++++---- crates/optimism/evm/src/lib.rs | 20 ++++++++++---------- crates/optimism/node/src/node.rs | 8 ++++---- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index c2b00614436..91cdb1bd2c5 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,6 +1,6 @@ //! Optimism block execution strategy. -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use crate::{l1::ensure_create2_deployer, OpEvmConfig, OptimismBlockExecutionError}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; @@ -29,7 +29,7 @@ use tracing::trace; /// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { +pub struct OpExecutionStrategyFactory { /// The chainspec chain_spec: Arc, /// How to create an EVM. @@ -39,7 +39,7 @@ pub struct OpExecutionStrategyFactory { impl OpExecutionStrategyFactory { /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) + Self::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)) } } @@ -339,7 +339,7 @@ mod tests { chain_spec: Arc, ) -> BasicBlockExecutorProvider { let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); + OpExecutionStrategyFactory::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)); BasicBlockExecutorProvider::new(strategy_factory) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 03aecf2c83e..1adb5db7496 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -38,12 +38,12 @@ use revm_primitives::{ /// Optimism-related EVM configuration. #[derive(Debug, Clone)] -pub struct OptimismEvmConfig { +pub struct OpEvmConfig { chain_spec: Arc, } -impl OptimismEvmConfig { - /// Creates a new [`OptimismEvmConfig`] with the given chain spec. +impl OpEvmConfig { + /// Creates a new [`OpEvmConfig`] with the given chain spec. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } @@ -54,7 +54,7 @@ impl OptimismEvmConfig { } } -impl ConfigureEvmEnv for OptimismEvmConfig { +impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; type Error = DecodeError; @@ -174,7 +174,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { } } -impl ConfigureEvm for OptimismEvmConfig { +impl ConfigureEvm for OpEvmConfig { type DefaultExternalContext<'a> = (); fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { @@ -226,8 +226,8 @@ mod tests { sync::Arc, }; - fn test_evm_config() -> OptimismEvmConfig { - OptimismEvmConfig::new(BASE_MAINNET.clone()) + fn test_evm_config() -> OpEvmConfig { + OpEvmConfig::new(BASE_MAINNET.clone()) } #[test] @@ -254,9 +254,9 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OptimismEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the @@ -266,7 +266,7 @@ mod tests { #[test] fn test_evm_configure() { - // Create a default `OptimismEvmConfig` + // Create a default `OpEvmConfig` let evm_config = test_evm_config(); // Initialize an empty database wrapped in CacheDB diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 87266767da2..7437ec676ab 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -20,7 +20,7 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_evm::{OpExecutionStrategyFactory, OptimismEvmConfig}; +use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Header}; @@ -185,14 +185,14 @@ impl ExecutorBuilder for OpExecutorBuilder where Node: FullNodeTypes>, { - type EVM = OptimismEvmConfig; + type EVM = OpEvmConfig; type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); + let evm_config = OpEvmConfig::new(ctx.chain_spec()); let strategy_factory = OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); let executor = BasicBlockExecutorProvider::new(strategy_factory); @@ -359,7 +359,7 @@ where ctx: &BuilderContext, pool: Pool, ) -> eyre::Result> { - self.spawn(OptimismEvmConfig::new(ctx.chain_spec()), ctx, pool) + self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } From e167cc9b0976e86963c294b3b102f4e6f4859ff7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 3 Nov 2024 13:51:20 +0000 Subject: [PATCH 302/970] chore(deps): weekly `cargo update` (#12289) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 134 ++++++++++++++++++++++++++--------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8aab2c0a8eb..0b3318c4ae0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feb7c662fd0be3d0c926a456be4ac44e9cf8e05cbd91df6db7f7140b861016a" +checksum = "836cf02383d9ebb35502d379bcd1ae803155094077eaab9c29131d888cd5fa3e" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -415,7 +415,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -639,7 +639,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -655,7 +655,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "syn-solidity", "tiny-keccak", ] @@ -671,7 +671,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "syn-solidity", ] @@ -828,9 +828,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -877,7 +877,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1100,7 +1100,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1111,7 +1111,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1149,7 +1149,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1255,7 +1255,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1437,7 +1437,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "synstructure", ] @@ -1559,7 +1559,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1647,9 +1647,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "67b9470d453346108f93a59222a9a1a5724db32d0a4727b7ab7ace4b4d822dc9" dependencies = [ "jobserver", "libc", @@ -1777,7 +1777,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2234,7 +2234,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2258,7 +2258,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2269,7 +2269,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2391,7 +2391,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2402,7 +2402,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2423,7 +2423,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "unicode-xid", ] @@ -2537,7 +2537,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2685,7 +2685,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2696,7 +2696,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2753,7 +2753,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3309,7 +3309,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3835,7 +3835,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3985,7 +3985,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4153,7 +4153,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4401,7 +4401,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4819,7 +4819,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4965,7 +4965,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5213,7 +5213,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5570,7 +5570,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5599,7 +5599,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5771,7 +5771,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5822,7 +5822,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -5920,7 +5920,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -6744,7 +6744,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -9904,7 +9904,7 @@ checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -9939,7 +9939,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -9990,7 +9990,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10013,7 +10013,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10299,7 +10299,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10357,9 +10357,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.86" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -10375,7 +10375,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10401,7 +10401,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10478,7 +10478,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10517,7 +10517,7 @@ checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10694,7 +10694,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -10895,7 +10895,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11286,7 +11286,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11357,7 +11357,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -11391,7 +11391,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11547,7 +11547,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11558,7 +11558,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11569,7 +11569,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11580,7 +11580,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11855,7 +11855,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "synstructure", ] @@ -11877,7 +11877,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11897,7 +11897,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "synstructure", ] @@ -11918,7 +11918,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -11940,7 +11940,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] From 20d3b21904dc5412c3526f72caa999be488a6886 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Sun, 3 Nov 2024 09:26:31 -0600 Subject: [PATCH 303/970] renamed OptimismEngineTypes to OpEngineTypes (#12285) --- crates/optimism/node/src/engine.rs | 18 +++++++++--------- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/src/node.rs | 21 +++++++++------------ 3 files changed, 19 insertions(+), 22 deletions(-) diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index af517b00ea0..d956f0cd5cd 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -22,17 +22,17 @@ use reth_optimism_payload_builder::{ /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes { +pub struct OpEngineTypes { _marker: std::marker::PhantomData, } -impl PayloadTypes for OptimismEngineTypes { +impl PayloadTypes for OpEngineTypes { type BuiltPayload = T::BuiltPayload; type PayloadAttributes = T::PayloadAttributes; type PayloadBuilderAttributes = T::PayloadBuilderAttributes; } -impl EngineTypes for OptimismEngineTypes +impl EngineTypes for OpEngineTypes where T::BuiltPayload: TryInto + TryInto @@ -45,7 +45,7 @@ where type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; } -/// A default payload type for [`OptimismEngineTypes`] +/// A default payload type for [`OpEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] pub struct OpPayloadTypes; @@ -224,7 +224,7 @@ mod test { let attributes = get_attributes(None, 1799999999); let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes ); @@ -237,7 +237,7 @@ mod test { let attributes = get_attributes(None, 1800000000); let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes ); @@ -250,7 +250,7 @@ mod test { let attributes = get_attributes(Some(b64!("0000000000000008")), 1800000000); let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes ); @@ -263,7 +263,7 @@ mod test { let attributes = get_attributes(Some(b64!("0000000800000008")), 1800000000); let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes ); @@ -276,7 +276,7 @@ mod test { let attributes = get_attributes(Some(b64!("0000000000000000")), 1800000000); let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes ); diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index ff25e7173a6..f2870d0b839 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -15,7 +15,7 @@ pub mod args; /// Exports optimism-specific implementations of the [`EngineTypes`](reth_node_api::EngineTypes) /// trait. pub mod engine; -pub use engine::OptimismEngineTypes; +pub use engine::OpEngineTypes; pub mod node; pub use node::OptimismNode; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 7437ec676ab..7e6f1d0981c 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -36,7 +36,7 @@ use crate::{ args::RollupArgs, engine::OptimismEngineValidator, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, + OpEngineTypes, }; /// Optimism primitive types. @@ -74,7 +74,7 @@ impl OptimismNode { > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; @@ -93,9 +93,7 @@ impl OptimismNode { impl Node for OptimismNode where - N: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + N: FullNodeTypes>, { type ComponentsBuilder = ComponentsBuilder< N, @@ -127,7 +125,7 @@ impl NodeTypes for OptimismNode { } impl NodeTypesWithEngine for OptimismNode { - type Engine = OptimismEngineTypes; + type Engine = OpEngineTypes; } /// Add-ons w.r.t. optimism. @@ -312,10 +310,10 @@ impl OpPayloadBuilder { evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, @@ -349,16 +347,15 @@ impl OpPayloadBuilder { impl PayloadServiceBuilder for OpPayloadBuilder where - Node: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + Node: + FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { + ) -> eyre::Result> { self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } From 4e3b32c5afe27ee9643d5e6afbc81bf670041f0f Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Sun, 3 Nov 2024 10:40:49 -0500 Subject: [PATCH 304/970] feat: add more decimals to database growth (#12288) --- etc/grafana/dashboards/overview.json | 1 + 1 file changed, 1 insertion(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 8c77f5979fe..25cc280fe03 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1953,6 +1953,7 @@ "mode": "off" } }, + "decimals": 4, "mappings": [], "thresholds": { "mode": "absolute", From 61f19ab2dcfe06a23dd7a88acf1ac8ce49ef75dd Mon Sep 17 00:00:00 2001 From: Deon <110722148+DanielEmmanuel1@users.noreply.github.com> Date: Sun, 3 Nov 2024 18:03:45 +0100 Subject: [PATCH 305/970] Refactor: use fully-qualified paths in Compact derives(Deon Branch) (#12279) Co-authored-by: Matthias Seitz --- crates/primitives/src/receipt.rs | 5 ++- crates/primitives/src/transaction/mod.rs | 3 -- crates/primitives/src/transaction/tx_type.rs | 3 -- crates/storage/codecs/derive/src/arbitrary.rs | 20 +++++++++-- .../codecs/derive/src/compact/generator.rs | 33 +++++++++++++++++-- .../storage/codecs/derive/src/compact/mod.rs | 11 ++++--- crates/storage/codecs/derive/src/lib.rs | 4 +-- .../codecs/src/alloy/authorization_list.rs | 3 +- .../codecs/src/alloy/genesis_account.rs | 10 ++++-- crates/storage/codecs/src/alloy/header.rs | 2 ++ .../codecs/src/alloy/transaction/eip1559.rs | 3 +- .../codecs/src/alloy/transaction/eip2930.rs | 3 +- .../codecs/src/alloy/transaction/eip4844.rs | 3 +- .../codecs/src/alloy/transaction/eip7702.rs | 3 +- .../codecs/src/alloy/transaction/legacy.rs | 3 +- .../codecs/src/alloy/transaction/optimism.rs | 3 +- crates/storage/codecs/src/alloy/withdrawal.rs | 3 +- crates/storage/codecs/src/lib.rs | 6 ++-- 18 files changed, 87 insertions(+), 34 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 21443f482c9..e60bddb9d79 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -11,8 +11,6 @@ use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodab use bytes::{Buf, BufMut}; use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; -#[cfg(feature = "reth-codec")] -use reth_codecs::Compact; use serde::{Deserialize, Serialize}; /// Receipt containing result of transaction execution. @@ -91,7 +89,7 @@ impl Receipts { self.receipt_vec.len() } - /// Returns `true` if the `Receipts` vector is empty. + /// Returns true if the `Receipts` vector is empty. pub fn is_empty(&self) -> bool { self.receipt_vec.is_empty() } @@ -518,6 +516,7 @@ mod tests { use super::*; use crate::revm_primitives::Bytes; use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use reth_codecs::Compact; #[test] fn test_decode_receipt() { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2e0a786fc1a..59d3b9e1297 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -63,9 +63,6 @@ use tx_type::{ COMPACT_IDENTIFIER_LEGACY, }; -#[cfg(test)] -use reth_codecs::Compact; - use alloc::vec::Vec; /// Either a transaction hash or number. diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index b6e1ecbf226..eff1c17a71a 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -6,9 +6,6 @@ use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use serde::{Deserialize, Serialize}; -#[cfg(test)] -use reth_codecs::Compact; - /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 8aa44062e21..753bb1e33a5 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -18,10 +18,26 @@ pub fn maybe_generate_tests( let mut traits = vec![]; let mut roundtrips = vec![]; let mut additional_tests = vec![]; + let mut is_crate = false; - for arg in args { + let mut iter = args.into_iter().peekable(); + + // we check if there's a crate argument which is used from inside the codecs crate directly + if let Some(arg) = iter.peek() { + if arg.to_string() == "crate" { + is_crate = true; + iter.next(); + } + } + + for arg in iter { if arg.to_string() == "compact" { - traits.push(quote! { use super::Compact; }); + let path = if is_crate { + quote! { use crate::Compact; } + } else { + quote! { use reth_codecs::Compact; } + }; + traits.push(path); roundtrips.push(quote! { { let mut buf = vec![]; diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 1fb6d40fa2b..cf9bcc0c629 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -2,10 +2,12 @@ use super::*; use convert_case::{Case, Casing}; +use syn::{Attribute, LitStr}; /// Generates code to implement the `Compact` trait for a data type. pub fn generate_from_to( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -20,6 +22,8 @@ pub fn generate_from_to( let fuzz = format_ident!("fuzz_test_{snake_case_ident}"); let test = format_ident!("fuzz_{snake_case_ident}"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let lifetime = if has_lifetime { quote! { 'a } } else { @@ -28,11 +32,11 @@ pub fn generate_from_to( let impl_compact = if has_lifetime { quote! { - impl<#lifetime> Compact for #ident<#lifetime> + impl<#lifetime> #reth_codecs::Compact for #ident<#lifetime> } } else { quote! { - impl Compact for #ident + impl #reth_codecs::Compact for #ident } }; @@ -53,6 +57,7 @@ pub fn generate_from_to( #[allow(dead_code)] #[test_fuzz::test_fuzz] fn #fuzz(obj: #ident) { + use #reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = #ident::from_compact(buf.as_ref(), len); @@ -191,7 +196,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< } // Just because a type supports compression, doesn't mean all its values are to be compressed. - // We skip the smaller ones, and thus require a flag `__zstd` to specify if this value is + // We skip the smaller ones, and thus require a flag` __zstd` to specify if this value is // compressed or not. if is_zstd { lines.push(quote! { @@ -232,3 +237,25 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< lines } + +/// Function to extract the crate path from `reth_codecs(crate = "...")` attribute. +fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { + // let default_crate_path: syn::Path = syn::parse_str("reth-codecs").unwrap(); + let mut reth_codecs_path: syn::Path = syn::parse_quote!(reth_codecs); + for attr in attrs { + if attr.path().is_ident("reth_codecs") { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("crate") { + let value = meta.value()?; + let lit: LitStr = value.parse()?; + reth_codecs_path = syn::parse_str(&lit.value())?; + Ok(()) + } else { + Err(meta.error("unsupported attribute")) + } + })?; + } + } + + Ok(reth_codecs_path) +} diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index e5a79b3fe53..b9d5cf18d6b 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -43,13 +43,13 @@ pub enum FieldTypes { pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { let mut output = quote! {}; - let DeriveInput { ident, data, generics, .. } = parse_macro_input!(input); + let DeriveInput { ident, data, generics, attrs, .. } = parse_macro_input!(input); let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, has_lifetime, &fields, is_zstd)); - output.extend(generate_from_to(&ident, has_lifetime, &fields, is_zstd)); + output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.into() } @@ -233,10 +233,10 @@ mod tests { // Generate code that will impl the `Compact` trait. let mut output = quote! {}; - let DeriveInput { ident, data, .. } = parse2(f_struct).unwrap(); + let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, false, &fields, false)); - output.extend(generate_from_to(&ident, false, &fields, false)); + output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); // Expected output in a TokenStream format. Commas matter! let should_output = quote! { @@ -285,6 +285,7 @@ mod tests { #[allow(dead_code)] #[test_fuzz::test_fuzz] fn fuzz_test_test_struct(obj: TestStruct) { + use reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = TestStruct::from_compact(buf.as_ref(), len); @@ -295,7 +296,7 @@ mod tests { pub fn fuzz_test_struct() { fuzz_test_test_struct(TestStruct::default()) } - impl Compact for TestStruct { + impl reth_codecs::Compact for TestStruct { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { let mut flags = TestStructFlags::default(); let mut total_length = 0; diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 4ffdbfd6ef6..0b4015830f5 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -49,14 +49,14 @@ mod compact; /// own encoding and do not rely on the bitflag struct. /// - `Bytes` fields and any types containing a `Bytes` field should be placed last to ensure /// efficient decoding. -#[proc_macro_derive(Compact, attributes(maybe_zero))] +#[proc_macro_derive(Compact, attributes(maybe_zero, reth_codecs))] pub fn derive(input: TokenStream) -> TokenStream { let is_zstd = false; compact::derive(input, is_zstd) } /// Adds `zstd` compression to derived [`Compact`]. -#[proc_macro_derive(CompactZstd, attributes(maybe_zero))] +#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs))] pub fn derive_zstd(input: TokenStream) -> TokenStream { let is_zstd = true; compact::derive(input, is_zstd) diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 3fc9518a637..e17c0fb32a1 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -11,12 +11,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Authorization { chain_id: u64, address: Address, diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index b29fe526df4..a35d4947db7 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -11,6 +11,7 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` #[derive(Debug, Clone, PartialEq, Eq, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct GenesisAccountRef<'a> { /// The nonce of the account at genesis. nonce: Option, @@ -27,12 +28,13 @@ pub(crate) struct GenesisAccountRef<'a> { /// Acts as bridge which simplifies Compact implementation for /// `AlloyGenesisAccount`. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. nonce: Option, @@ -47,21 +49,23 @@ pub(crate) struct GenesisAccount { } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntries { entries: Vec, } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntry { key: B256, value: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 78f2029c32e..04b7d6ab718 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -18,6 +18,7 @@ use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct Header { parent_hash: B256, ommers_hash: B256, @@ -54,6 +55,7 @@ pub(crate) struct Header { )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { requests_hash: Option, } diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 0e7f44cdec1..6d910a6900c 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -13,11 +13,12 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(compact))] +#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(crate, compact))] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxEip1559 { chain_id: ChainId, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index 75cab9e8a09..aeb08f361be 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -15,12 +15,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index c89e2b0785b..fac9ab9a1b2 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -16,9 +16,10 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr(any(test, feature = "test-utils"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index 8acf59425f2..eab10af0b66 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -16,12 +16,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index c83626aa4cf..60250ba64af 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -6,10 +6,11 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// Legacy transaction. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize), - crate::add_arbitrary_tests(compact) + crate::add_arbitrary_tests(crate, compact) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxLegacy { diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 22f508fd4ce..bb970b58177 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -19,7 +19,8 @@ use reth_codecs_derive::add_arbitrary_tests; derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxDeposit { source_hash: B256, from: Address, diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 0f3347cec1a..8aa5671798d 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -13,8 +13,9 @@ use reth_codecs_derive::add_arbitrary_tests; any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] +#[reth_codecs(crate = "crate")] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. index: u64, diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 598f2131bde..284c6454f83 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -662,7 +662,8 @@ mod tests { } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Compact, arbitrary::Arbitrary)] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] struct TestStruct { f_u64: u64, f_u256: U256, @@ -714,7 +715,8 @@ mod tests { #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Compact, arbitrary::Arbitrary, )] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] enum TestEnum { #[default] Var0, From 21d911abb2cf594834c1818d2f723718b5109f8b Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Mon, 4 Nov 2024 00:49:07 +0700 Subject: [PATCH 306/970] docs: add debug.etherscan in the book (#12293) Co-authored-by: Matthias Seitz --- book/run/mainnet.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 6f1ec144df8..c4908971f69 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -84,3 +84,13 @@ In the meantime, consider setting up [observability](./observability.md) to moni ## Running without a Consensus Layer We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. + +## Running with Etherscan as Block Source + +You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `. + +Example: +```bash +export ETHERSCAN_API_KEY=your_api_key_here +reth node --debug.etherscan +``` \ No newline at end of file From bb03578eed90d2eff4d75a420cb4bee224102167 Mon Sep 17 00:00:00 2001 From: garwah <14845405+garwahl@users.noreply.github.com> Date: Mon, 4 Nov 2024 18:01:40 +1100 Subject: [PATCH 307/970] chore: Move FillTxEnv::fill_tx_env into SignedTransaction trait and implement in TransactionSigned (#12186) Co-authored-by: garwah Co-authored-by: Emilia Hane --- .../src/transaction/signed.rs | 4 + crates/primitives/src/transaction/mod.rs | 117 ++++++++++++++++++ 2 files changed, 121 insertions(+) diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 748ef39666d..4c12437212a 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -6,6 +6,7 @@ use core::hash::Hash; use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, Signature, TxHash, B256}; +use revm_primitives::TxEnv; /// A signed transaction. pub trait SignedTransaction: @@ -64,4 +65,7 @@ pub trait SignedTransaction: fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } + + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 59d3b9e1297..b9a24316c40 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -64,6 +64,8 @@ use tx_type::{ }; use alloc::vec::Vec; +use reth_primitives_traits::SignedTransaction; +use revm_primitives::{AuthorizationList, TxEnv}; /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; @@ -1123,6 +1125,11 @@ impl TransactionSigned { &self.signature } + /// Transaction + pub const fn transaction(&self) -> &Transaction { + &self.transaction + } + /// Transaction hash. Used to identify transaction. pub const fn hash(&self) -> TxHash { self.hash @@ -1398,6 +1405,116 @@ impl alloy_consensus::Transaction for TransactionSigned { } } +impl SignedTransaction for TransactionSigned { + type Transaction = Transaction; + + fn tx_hash(&self) -> &TxHash { + Self::hash_ref(self) + } + + fn transaction(&self) -> &Self::Transaction { + Self::transaction(self) + } + + fn signature(&self) -> &Signature { + Self::signature(self) + } + + fn recover_signer(&self) -> Option
{ + Self::recover_signer(self) + } + + fn recover_signer_unchecked(&self) -> Option
{ + Self::recover_signer_unchecked(self) + } + + fn from_transaction_and_signature( + transaction: Self::Transaction, + signature: Signature, + ) -> Self { + Self::from_transaction_and_signature(transaction, signature) + } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + tx_env.authorization_list = None; + } + Transaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(_) => {} + } + } +} + impl From for TransactionSigned { fn from(recovered: TransactionSignedEcRecovered) -> Self { recovered.signed_transaction From 98140416032dd0e964c28fe87675e453baa815d2 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:18:13 +0100 Subject: [PATCH 308/970] primitives: rm useless `OP_` constants (#12298) --- crates/primitives-traits/src/constants/mod.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index f3dd28e1929..94eaf95c269 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,6 +1,6 @@ //! Ethereum protocol-related constants -use alloy_primitives::{address, b256, Address, B256}; +use alloy_primitives::{b256, B256}; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -16,12 +16,6 @@ pub const MINIMUM_GAS_LIMIT: u64 = 5000; pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); -/// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` -pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); - -/// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` -pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only From 56b76871edd9680290242dd0f7d66d7a8b8b6cba Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:21:17 +0100 Subject: [PATCH 309/970] primitives: rm alloy `Withdrawal` reexport (#12296) --- crates/consensus/common/src/validation.rs | 3 ++- crates/evm/src/state_change.rs | 3 ++- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/withdrawal.rs | 5 +---- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/proofs.rs | 6 ++---- .../storage/provider/src/providers/blockchain_provider.rs | 4 ++-- crates/storage/provider/src/providers/consistent.rs | 6 ++++-- crates/storage/provider/src/providers/database/mod.rs | 5 ++--- crates/storage/provider/src/providers/database/provider.rs | 4 ++-- crates/storage/provider/src/providers/mod.rs | 5 ++--- .../storage/provider/src/providers/static_file/manager.rs | 5 ++--- crates/storage/provider/src/test_utils/blocks.rs | 3 ++- crates/storage/provider/src/test_utils/mock.rs | 4 ++-- crates/storage/provider/src/test_utils/noop.rs | 4 ++-- crates/storage/storage-api/src/withdrawals.rs | 4 ++-- examples/custom-beacon-withdrawals/src/main.rs | 4 ++-- testing/testing-utils/Cargo.toml | 1 + testing/testing-utils/src/generators.rs | 3 ++- 19 files changed, 36 insertions(+), 37 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index c6539cdcf71..a6e5d21587f 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -275,6 +275,7 @@ pub fn validate_against_parent_4844( mod tests { use super::*; use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, }; @@ -283,7 +284,7 @@ mod tests { use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, - Withdrawal, Withdrawals, + Withdrawals, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 2a3d93f94d9..2d91ac30eeb 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,9 +1,10 @@ //! State changes that are not related to transactions. +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Block, Withdrawal, Withdrawals}; +use reth_primitives::{Block, Withdrawals}; /// Collect all balance changes at the end of the block. /// diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 9f41bbd47fb..90a6935ae10 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -34,7 +34,7 @@ pub mod block; pub use block::{body::BlockBody, Block}; mod withdrawal; -pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal::Withdrawals; mod error; pub use error::{GotExpected, GotExpectedBoxed}; diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 995e60292c6..8f072afa578 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,13 +1,10 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. use alloc::vec::Vec; +use alloy_eips::eip4895::Withdrawal; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; use reth_codecs::{add_arbitrary_tests, Compact}; - -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; use serde::{Deserialize, Serialize}; /// Represents a collection of Withdrawals. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4e3f1d3bd24..9bb27e658ca 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -45,7 +45,7 @@ pub use receipt::{ }; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, SealedHeader, StorageEntry, Withdrawal, Withdrawals, + LogData, SealedHeader, StorageEntry, Withdrawals, }; pub use static_file::StaticFileSegment; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 1697246702a..000244d2c54 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,11 +1,9 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{ - Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned, Withdrawal, -}; +use crate::{Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; use alloc::vec::Vec; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 64a8a204a32..85f759310ba 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,7 +9,7 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -25,7 +25,7 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e6ca1a91932..0784bdb6346 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,7 +6,9 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_eips::{ + eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -17,7 +19,7 @@ use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 04a30ce90aa..bd0466ff973 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,7 +7,7 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -18,8 +18,7 @@ use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2af22cec0b5..d47532e712d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,7 +15,7 @@ use crate::{ StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; @@ -43,7 +43,7 @@ use reth_primitives::{ Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 3b24617fd95..5e95c6ce0db 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -7,7 +7,7 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, @@ -21,8 +21,7 @@ use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e81dc01f722..70c1e38f6ac 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -7,7 +7,7 @@ use crate::{ ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; @@ -31,8 +31,7 @@ use reth_primitives::{ DEFAULT_BLOCKS_PER_STATIC_FILE, }, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index cacb71b351d..d524f47cc75 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -6,12 +6,13 @@ use alloy_primitives::{ TxKind, B256, U256, }; +use alloy_eips::eip4895::Withdrawal; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, + Signature, Transaction, TransactionSigned, TxType, Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 07fa505b2b4..2e7cd6a06de 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -6,7 +6,7 @@ use crate::{ StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::constants::EMPTY_ROOT_HASH; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -23,7 +23,7 @@ use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index e0943764772..8e6dc7425cf 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -20,7 +20,7 @@ use reth_evm::ConfigureEvmEnv; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs index 2de69b34eb6..ba422a3b33b 100644 --- a/crates/storage/storage-api/src/withdrawals.rs +++ b/crates/storage/storage-api/src/withdrawals.rs @@ -1,5 +1,5 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::{Withdrawal, Withdrawals}; +use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; +use reth_primitives::Withdrawals; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 09dad2f7007..43e5f7428f6 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_sol_macro::sol; use alloy_sol_types::SolCall; #[cfg(feature = "optimism")] @@ -30,7 +30,7 @@ use reth_primitives::{ revm_primitives::{ address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, }, - BlockWithSenders, Receipt, Withdrawal, + BlockWithSenders, Receipt, }; use std::{fmt::Display, sync::Arc}; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 98bfeabdfb1..3e0f58a7bd0 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 571727cb2fd..84225ea72cd 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,6 +1,7 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. use alloy_consensus::{Transaction as _, TxLegacy}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; pub use rand::Rng; use rand::{ @@ -8,7 +9,7 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, - StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, + StorageEntry, Transaction, TransactionSigned, Withdrawals, }; use secp256k1::{Keypair, Secp256k1}; use std::{ From 566f2b4950d9e86c83dd527cde8d99b3a35f6a49 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Nov 2024 11:15:56 +0100 Subject: [PATCH 310/970] primitives: rm alloy `BlockHashOrNumber` reexport (#12302) --- bin/reth/src/commands/debug_cmd/execution.rs | 2 +- bin/reth/src/commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- crates/consensus/auto-seal/src/client.rs | 3 ++- crates/consensus/auto-seal/src/lib.rs | 6 +++--- crates/consensus/common/src/validation.rs | 5 ++--- crates/engine/local/Cargo.toml | 6 +++--- crates/evm/src/provider.rs | 2 +- crates/net/eth-wire-types/src/blocks.rs | 3 ++- crates/node/core/src/node_config.rs | 3 ++- crates/node/core/src/utils.rs | 3 ++- crates/primitives/src/block.rs | 4 +--- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/transaction/mod.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- crates/rpc/rpc-eth-types/src/cache/mod.rs | 5 ++--- .../storage/provider/src/providers/blockchain_provider.rs | 4 ++-- 17 files changed, 30 insertions(+), 30 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 215afacb583..cc584c89287 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,7 @@ //! Command for debugging execution. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; @@ -23,7 +24,6 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 51851c0b0ad..2c56da9b4cf 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -4,6 +4,7 @@ use crate::{ args::NetworkArgs, utils::{get_single_body, get_single_header}, }; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_chainspec::ChainSpec; @@ -19,7 +20,6 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 8e02a52eaf0..3c6e38512c9 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,5 +1,6 @@ //! Command for debugging merkle trie calculation. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -18,7 +19,6 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index f9b80f10bb5..0083192d7df 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -1,6 +1,7 @@ //! This includes download client implementations for auto sealing miners. use crate::Storage; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -9,7 +10,7 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; +use reth_primitives::{BlockBody, Header}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 16299e19ba4..ad7e66acc0e 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -15,7 +15,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip1898::BlockHashOrNumber, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -26,8 +26,8 @@ use reth_execution_errors::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, SealedBlock, - SealedHeader, TransactionSigned, Withdrawals, + proofs, Block, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, + TransactionSigned, Withdrawals, }; use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index a6e5d21587f..092330595ff 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -275,7 +275,7 @@ pub fn validate_against_parent_4844( mod tests { use super::*; use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; - use alloy_eips::eip4895::Withdrawal; + use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, }; @@ -283,8 +283,7 @@ mod tests { use rand::Rng; use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ - proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, - Withdrawals, + proofs, Account, BlockBody, Signature, Transaction, TransactionSigned, Withdrawals, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index d9dc6325339..2ab448e3bbf 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -47,7 +47,7 @@ workspace = true [features] optimism = [ - "op-alloy-rpc-types-engine", - "reth-beacon-consensus/optimism", - "reth-provider/optimism" + "op-alloy-rpc-types-engine", + "reth-beacon-consensus/optimism", + "reth-provider/optimism", ] diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 8db828ec4a0..84c38db0dc5 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,7 +1,7 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; -use alloy_eips::eip1898::BlockHashOrNumber; +use alloy_eips::BlockHashOrNumber; use reth_primitives::Header; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 878b4573f2b..5ae84319005 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -113,9 +113,10 @@ mod tests { HeadersDirection, }; use alloy_consensus::TxLegacy; + use alloy_eips::BlockHashOrNumber; use alloy_primitives::{hex, Parity, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned}; + use reth_primitives::{Header, Signature, Transaction, TransactionSigned}; use std::str::FromStr; use super::BlockBody; diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 80fb5152e7b..3848772c415 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -15,8 +15,9 @@ use reth_network_p2p::headers::client::HeadersClient; use serde::{de::DeserializeOwned, Serialize}; use std::{fs, path::Path}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{BlockHashOrNumber, Head, SealedHeader}; +use reth_primitives::{Head, SealedHeader}; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a64d1211455..a04d4e324e1 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,6 +1,7 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. +use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; @@ -11,7 +12,7 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 7e5e76f1b06..153f67b03d9 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,9 +2,7 @@ use crate::{ GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, }; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{ - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, -}; +pub use alloy_eips::eip1898::{BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash}; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 9bb27e658ca..3b8589f8f1d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -34,8 +34,8 @@ pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ - Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, - ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, + Block, BlockBody, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, ForkBlock, + RpcBlockHash, SealedBlock, SealedBlockWithSenders, }; #[cfg(feature = "reth-codec")] pub use compression::*; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b9a24316c40..f7d36be0ab8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,12 +1,12 @@ //! Transaction types. -use crate::BlockHashOrNumber; #[cfg(any(test, feature = "reth-codec"))] use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; use alloy_consensus::{ SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ + eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, eip7702::SignedAuthorization, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 383da2d21ff..20eeb390ac1 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests}; +use alloy_eips::{eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, @@ -19,7 +19,7 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; +use reth_primitives::{Block, EthereumHardfork}; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index cbf05f2764e..b6b0364c477 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,14 +1,13 @@ //! Async caching support for eth RPC +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{ - BlockHashOrNumber, Header, Receipt, SealedBlockWithSenders, TransactionSigned, -}; +use reth_primitives::{Header, Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 85f759310ba..669a3555931 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1444,7 +1444,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(15), + alloy_eips::BlockHashOrNumber::Number(15), shainghai_timestamp ) .expect("could not call withdrawals by block"), @@ -1456,7 +1456,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(block.number), + alloy_eips::BlockHashOrNumber::Number(block.number), shainghai_timestamp )? .unwrap(), From 0475af8bdb605ef8deadeb3452a59f9d71373d49 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:47:18 +0100 Subject: [PATCH 311/970] primitives: rm alloy `BlockId` reexport (#12303) --- Cargo.lock | 4 +++- crates/primitives/src/block.rs | 4 ++-- crates/primitives/src/lib.rs | 4 ++-- crates/rpc/rpc-api/src/debug.rs | 3 ++- crates/rpc/rpc-api/src/otterscan.rs | 2 +- crates/rpc/rpc-api/src/reth.rs | 2 +- crates/rpc/rpc-api/src/trace.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/tests/it/http.rs | 3 ++- crates/rpc/rpc-eth-api/src/core.rs | 4 ++-- crates/rpc/rpc-eth-api/src/helpers/block.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/state.rs | 3 ++- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 6 ++---- crates/rpc/rpc-eth-types/src/error.rs | 3 ++- crates/rpc/rpc-eth-types/src/pending_block.rs | 3 ++- crates/rpc/rpc-server-types/Cargo.toml | 3 +-- crates/rpc/rpc-server-types/src/result.rs | 2 +- crates/rpc/rpc-testing-util/Cargo.toml | 3 ++- crates/rpc/rpc-testing-util/src/debug.rs | 3 ++- crates/rpc/rpc-testing-util/src/trace.rs | 2 +- crates/rpc/rpc/src/debug.rs | 4 ++-- crates/rpc/rpc/src/engine.rs | 3 ++- crates/rpc/rpc/src/otterscan.rs | 3 ++- crates/rpc/rpc/src/reth.rs | 2 +- crates/rpc/rpc/src/trace.rs | 3 ++- 25 files changed, 43 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b3318c4ae0..9b5a5350c16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8706,6 +8706,7 @@ dependencies = [ name = "reth-rpc-api-testing-util" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "alloy-rpc-types-eth", @@ -8725,6 +8726,7 @@ dependencies = [ name = "reth-rpc-builder" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8910,13 +8912,13 @@ dependencies = [ name = "reth-rpc-server-types" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", "reth-network-api", - "reth-primitives", "serde", "strum", ] diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 153f67b03d9..2b651652df5 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,7 +2,7 @@ use crate::{ GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, }; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash}; +pub use alloy_eips::eip1898::{BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash}; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; @@ -905,7 +905,7 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; - use alloy_eips::eip1898::HexStringMissingPrefixError; + use alloy_eips::{eip1898::HexStringMissingPrefixError, BlockId}; use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 3b8589f8f1d..679b24abd84 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -34,8 +34,8 @@ pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ - Block, BlockBody, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, ForkBlock, - RpcBlockHash, SealedBlock, SealedBlockWithSenders, + Block, BlockBody, BlockNumHash, BlockNumberOrTag, BlockWithSenders, ForkBlock, RpcBlockHash, + SealedBlock, SealedBlockWithSenders, }; #[cfg(feature = "reth-codec")] pub use compression::*; diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 3e03210f1ff..162699c6ebc 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; @@ -6,7 +7,7 @@ use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; +use reth_primitives::BlockNumberOrTag; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index ee805b482c3..d3e61c03104 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types::Header; @@ -6,7 +7,6 @@ use alloy_rpc_types_trace::otterscan::{ TransactionsWithReceipts, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 98c31b78f9a..0589ffc00ce 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,6 +1,6 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; use std::collections::HashMap; /// Reth API namespace for reth-specific methods diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 58dda422ab8..45059284a2d 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -7,7 +8,6 @@ use alloy_rpc_types_trace::{ parity::*, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 2d10dabf8af..b6ae86c7408 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -68,6 +68,7 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index ed9ef56d62b..5c33a5d34df 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,6 +2,7 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; +use alloy_eips::BlockId; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; use alloy_rpc_types_eth::{ transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, @@ -18,7 +19,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt}; +use reth_primitives::{BlockNumberOrTag, Receipt}; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 66bc5a44d2d..185297c2255 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,7 +1,7 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. use alloy_dyn_abi::TypedData; -use alloy_eips::eip2930::AccessListResult; +use alloy_eips::{eip2930::AccessListResult, BlockId}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; use alloy_rpc_types::{ @@ -13,7 +13,7 @@ use alloy_rpc_types::{ }; use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; +use reth_primitives::BlockNumberOrTag; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index fa397db35e0..bb8fd08ed87 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,9 +2,10 @@ use std::sync::Arc; +use alloy_eips::BlockId; use alloy_rpc_types::{Header, Index}; use futures::Future; -use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 97c94b94932..d980b9114b1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -2,13 +2,14 @@ //! RPC methods. use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; +use reth_primitives::Header; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 3c526cbb025..ab94e3dd107 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -3,15 +3,13 @@ use alloy_consensus::Transaction; use alloy_dyn_abi::TypedData; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types::{BlockNumberOrTag, TransactionInfo}; use alloy_rpc_types_eth::transaction::TransactionRequest; use futures::Future; -use reth_primitives::{ - BlockId, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, -}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index b38b3122708..9241e9e0b6b 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -2,11 +2,12 @@ use std::time::Duration; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; +use reth_primitives::revm_primitives::InvalidHeader; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 949e205dcf8..d3e7c4158ac 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,9 +4,10 @@ use std::time::Instant; +use alloy_eips::BlockId; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index 08ecd394774..275d8ea561b 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -14,11 +14,11 @@ workspace = true [dependencies] reth-errors.workspace = true reth-network-api.workspace = true -reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true @@ -27,4 +27,3 @@ jsonrpsee-types.workspace = true # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } - diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 10ce1650ad1..5d1b702e9fc 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -2,10 +2,10 @@ use std::fmt; +use alloy_eips::BlockId; use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; use reth_errors::ConsensusError; -use reth_primitives::BlockId; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 4977c3a2c40..e5c57502e2b 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -21,6 +21,7 @@ alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types.workspace = true alloy-rpc-types-trace.workspace = true +alloy-eips.workspace = true # async futures.workspace = true @@ -36,4 +37,4 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true \ No newline at end of file +alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 97fe008fa97..d4c7dce860b 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -6,6 +6,7 @@ use std::{ task::{Context, Poll}, }; +use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types::{Block, Transaction}; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -15,7 +16,7 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index 0fefef7c997..efb1f3674e0 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,5 +1,6 @@ //! Helpers for testing trace calls. +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types::Index; use alloy_rpc_types_eth::transaction::TransactionRequest; @@ -10,7 +11,6 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use std::{ pin::Pin, diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index e2746a53cd0..6a73af69d92 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,4 +1,4 @@ -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types::{ @@ -19,7 +19,7 @@ use reth_evm::{ system_calls::SystemCaller, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; +use reth_primitives::{Block, BlockNumberOrTag, TransactionSignedEcRecovered}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 928e2050a5c..0ff90d39998 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256, U64}; use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, @@ -5,7 +6,7 @@ use alloy_rpc_types::{ use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{BlockId, BlockNumberOrTag}; +use reth_primitives::BlockNumberOrTag; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index a772dd501d4..da33bf5d3d0 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,4 +1,5 @@ use alloy_consensus::Transaction; +use alloy_eips::BlockId; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; @@ -11,7 +12,7 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; -use reth_primitives::{BlockId, BlockNumberOrTag}; +use reth_primitives::BlockNumberOrTag; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 6d5897df131..c33f97f5301 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,10 +1,10 @@ use std::{collections::HashMap, future::Future, sync::Arc}; +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; -use reth_primitives::BlockId; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 2883818afd9..38c73b0f516 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types::{ state::{EvmOverrides, StateOverride}, @@ -17,7 +18,7 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; +use reth_primitives::Header; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; From 3fe22343f1b96ef97c4199741e0b44b6567799b4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 4 Nov 2024 13:48:32 +0100 Subject: [PATCH 312/970] feat(engine): add StateRootTask skeleton (#12305) --- Cargo.lock | 2 + crates/engine/tree/Cargo.toml | 3 ++ crates/engine/tree/src/tree/mod.rs | 2 + crates/engine/tree/src/tree/root.rs | 60 +++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+) create mode 100644 crates/engine/tree/src/tree/root.rs diff --git a/Cargo.lock b/Cargo.lock index 9b5a5350c16..a885081cd36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7218,8 +7218,10 @@ dependencies = [ "reth-tracing", "reth-trie", "reth-trie-parallel", + "revm-primitives", "thiserror", "tokio", + "tokio-stream", "tracing", ] diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index dee0bcaf7ce..293883c036e 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -39,9 +39,12 @@ alloy-primitives.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true +revm-primitives.workspace = true + # common futures.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } +tokio-stream.workspace = true thiserror.workspace = true # metrics diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index bc070d87345..c3e922d11c9 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -75,6 +75,8 @@ pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; +mod root; + /// Keeps track of the state of the tree. /// /// ## Invariants diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs new file mode 100644 index 00000000000..48b2eccdf14 --- /dev/null +++ b/crates/engine/tree/src/tree/root.rs @@ -0,0 +1,60 @@ +//! State root task related functionality. + +use reth_provider::providers::ConsistentDbView; +use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie_parallel::parallel_root::ParallelStateRootError; +use revm_primitives::{EvmState, B256}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Standalone task that receives a transaction state stream and updates relevant +/// data structures to calculate state root. +/// +/// It is responsile of initializing a blinded sparse trie and subscribe to +/// transaction state stream. As it receives transaction execution results, it +/// fetches the proofs for relevant accounts from the database and reveal them +/// to the tree. +/// Then it updates relevant leaves according to the result of the transaction. +#[allow(dead_code)] +pub(crate) struct StateRootTask { + /// View over the state in the database. + consistent_view: ConsistentDbView, + /// Incoming state updates. + state_stream: UnboundedReceiverStream, + /// Latest trie input. + input: Arc, +} + +#[allow(dead_code)] +impl StateRootTask { + /// Creates a new `StateRootTask`. + pub(crate) const fn new( + consistent_view: ConsistentDbView, + input: Arc, + state_stream: UnboundedReceiverStream, + ) -> Self { + Self { consistent_view, state_stream, input } + } + + /// Handles state updates. + pub(crate) fn on_state_update(&self, _update: EvmState) { + // TODO: calculate hashed state update and dispatch proof gathering for it. + } +} + +impl Future for StateRootTask { + type Output = Result<(B256, TrieUpdates), ParallelStateRootError>; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + // TODO: + // * poll incoming state updates stream + // * keep track of proof calculation + // * keep track of intermediate root computation + Poll::Pending + } +} From d5f01036016263fbad6a3af9dc3707b2701adc1d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:54:58 +0100 Subject: [PATCH 313/970] primitives: rm alloy `BlockNumHash` reexport (#12304) --- Cargo.lock | 2 ++ crates/blockchain-tree-api/Cargo.toml | 1 + crates/blockchain-tree-api/src/lib.rs | 3 ++- crates/consensus/beacon/src/engine/mod.rs | 4 ++-- crates/exex/exex/src/event.rs | 2 +- crates/exex/exex/src/manager.rs | 3 ++- crates/exex/test-utils/Cargo.toml | 3 +++ crates/exex/test-utils/src/lib.rs | 3 ++- crates/primitives/src/block.rs | 2 +- crates/primitives/src/lib.rs | 4 ++-- crates/rpc/rpc-eth-types/src/logs_utils.rs | 3 ++- crates/storage/provider/src/test_utils/mock.rs | 6 +++--- crates/storage/provider/src/test_utils/noop.rs | 6 +++--- crates/storage/storage-api/src/block_id.rs | 6 +++--- 14 files changed, 29 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a885081cd36..9d40ebfb2c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6550,6 +6550,7 @@ dependencies = [ name = "reth-blockchain-tree-api" version = "1.1.0" dependencies = [ + "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", @@ -7557,6 +7558,7 @@ dependencies = [ name = "reth-exex-test-utils" version = "1.1.0" dependencies = [ + "alloy-eips", "eyre", "futures-util", "rand 0.8.5", diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index 552b7276717..b1c01f85938 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -18,6 +18,7 @@ reth-storage-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # misc thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 0a1bf6164e0..7e1d0d714c1 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -9,8 +9,9 @@ use self::error::CanonicalError; use crate::error::InsertBlockError; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::collections::BTreeMap; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index a00f507dbd9..65904196e1c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,4 @@ -use alloy_eips::merge::EPOCH_SLOTS; +use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -20,7 +20,7 @@ use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{BlockNumHash, Head, Header, SealedBlock, SealedHeader}; +use reth_primitives::{Head, Header, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index 1215ea2a502..bbd79addc9e 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use reth_primitives::BlockNumHash; +use alloy_eips::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 8c1518f3090..a17de660862 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,13 +1,14 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::{BlockNumHash, SealedHeader}; +use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; use reth_tracing::tracing::debug; use std::{ diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index cd0e0831b49..6e5af981b31 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -33,6 +33,9 @@ reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-trie-db.workspace = true +## alloy +alloy-eips.workspace = true + ## async futures-util.workspace = true tokio.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 06aa8c81c7c..5c3468a3c1c 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -15,6 +15,7 @@ use std::{ task::Poll, }; +use alloy_eips::BlockNumHash; use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; @@ -44,7 +45,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; +use reth_primitives::{Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 2b651652df5..b9f43df5d7c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,7 +2,7 @@ use crate::{ GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, }; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash}; +pub use alloy_eips::eip1898::{BlockNumberOrTag, ForkBlock, RpcBlockHash}; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 679b24abd84..4f56b1ac4e6 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -34,8 +34,8 @@ pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ - Block, BlockBody, BlockNumHash, BlockNumberOrTag, BlockWithSenders, ForkBlock, RpcBlockHash, - SealedBlock, SealedBlockWithSenders, + Block, BlockBody, BlockNumberOrTag, BlockWithSenders, ForkBlock, RpcBlockHash, SealedBlock, + SealedBlockWithSenders, }; #[cfg(feature = "reth-codec")] pub use compression::*; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 205e2bba37b..aa132675c93 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,11 +2,12 @@ //! //! Log parsing for building filter. +use alloy_eips::BlockNumHash; use alloy_primitives::TxHash; use alloy_rpc_types::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_storage_api::BlockReader; use std::sync::Arc; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 2e7cd6a06de..b5593b9040d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -463,15 +463,15 @@ impl BlockNumReader for MockEthProvider { } impl BlockIdReader for MockEthProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 8e6dc7425cf..65c08306239 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -175,15 +175,15 @@ impl BlockReaderIdExt for NoopProvider { } impl BlockIdReader for NoopProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 55cd6ab1c76..00856d348a5 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -99,13 +99,13 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the current pending block number and hash. - fn pending_block_num_hash(&self) -> ProviderResult>; + fn pending_block_num_hash(&self) -> ProviderResult>; /// Get the current safe block number and hash. - fn safe_block_num_hash(&self) -> ProviderResult>; + fn safe_block_num_hash(&self) -> ProviderResult>; /// Get the current finalized block number and hash. - fn finalized_block_num_hash(&self) -> ProviderResult>; + fn finalized_block_num_hash(&self) -> ProviderResult>; /// Get the safe block number. fn safe_block_number(&self) -> ProviderResult> { From 967cbc4e974ee2084f3342a0fbc37dd549861d05 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 5 Nov 2024 05:15:15 +0100 Subject: [PATCH 314/970] primitives: rm alloy `Signature` reexport (#12313) --- crates/blockchain-tree/src/blockchain_tree.rs | 6 +++--- crates/consensus/common/src/validation.rs | 4 ++-- crates/net/eth-wire-types/src/blocks.rs | 4 ++-- crates/net/eth-wire-types/src/transactions.rs | 4 ++-- crates/net/network/tests/it/big_pooled_txs_req.rs | 4 ++-- crates/net/network/tests/it/requests.rs | 4 ++-- crates/net/network/tests/it/txgossip.rs | 4 ++-- crates/optimism/evm/src/execute.rs | 4 ++-- crates/optimism/node/src/txpool.rs | 3 +-- crates/primitives/src/alloy_compat.rs | 4 ++-- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 12 ++++++------ crates/primitives/src/transaction/pooled.rs | 4 ++-- crates/primitives/src/transaction/sidecar.rs | 4 ++-- crates/primitives/src/transaction/signature.rs | 13 ++++--------- crates/primitives/src/transaction/util.rs | 3 +-- crates/rpc/rpc-eth-api/src/helpers/signer.rs | 4 ++-- crates/rpc/rpc-eth-types/src/simulate.rs | 5 ++--- .../rpc-types-compat/src/transaction/signature.rs | 10 +++++----- crates/rpc/rpc/src/eth/helpers/signer.rs | 4 ++-- crates/storage/provider/src/test_utils/blocks.rs | 3 ++- crates/transaction-pool/src/test_utils/mock.rs | 4 ++-- crates/transaction-pool/src/traits.rs | 3 ++- testing/testing-utils/src/generators.rs | 4 ++-- 24 files changed, 55 insertions(+), 61 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 4468d82052c..1674081fe70 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1377,7 +1377,7 @@ mod tests { use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::eip1559::INITIAL_BASE_FEE; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, Sealable, B256}; + use alloy_primitives::{keccak256, Address, Sealable, Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1389,8 +1389,8 @@ mod tests { use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, - Account, BlockBody, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, Withdrawals, + Account, BlockBody, Header, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Withdrawals, }; use reth_provider::{ test_utils::{ diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 092330595ff..d4dea07dcda 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -277,13 +277,13 @@ mod tests { use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{ - hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, + hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, Signature, U256, }; use mockall::mock; use rand::Rng; use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ - proofs, Account, BlockBody, Signature, Transaction, TransactionSigned, Withdrawals, + proofs, Account, BlockBody, Transaction, TransactionSigned, Withdrawals, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 5ae84319005..ce23dfe707f 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -114,9 +114,9 @@ mod tests { }; use alloy_consensus::TxLegacy; use alloy_eips::BlockHashOrNumber; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{Header, Signature, Transaction, TransactionSigned}; + use reth_primitives::{Header, Transaction, TransactionSigned}; use std::str::FromStr; use super::BlockBody; diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index ab65aa178ee..7c66f657a1d 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -78,10 +78,10 @@ impl FromIterator for PooledTransactions { mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_consensus::{TxEip1559, TxLegacy}; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_primitives::{PooledTransactionsElement, Signature, Transaction, TransactionSigned}; + use reth_primitives::{PooledTransactionsElement, Transaction, TransactionSigned}; use std::str::FromStr; #[test] diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 3a645da6c9f..29b62708eee 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,4 +1,4 @@ -use alloy_primitives::B256; +use alloy_primitives::{Signature, B256}; use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, @@ -6,7 +6,7 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ test_utils::{testing_pool, MockTransaction}, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 61241f02d2d..8c00302f7b4 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_consensus::TxEip2930; -use alloy_primitives::{Bytes, Parity, TxKind, U256}; +use alloy_primitives::{Bytes, Parity, Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; use reth_network::{ @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Signature, Transaction, TransactionSigned}; +use reth_primitives::{Block, Header, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 70ac67bb5bf..f08a2b2eb96 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use alloy_consensus::TxLegacy; -use alloy_primitives::U256; +use alloy_primitives::{Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; use reth_network_api::PeersInfo; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 91cdb1bd2c5..d64f4bd5ea5 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -297,12 +297,12 @@ mod tests { use super::*; use crate::OpChainSpec; use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use alloy_primitives::{b256, Address, Signature, StorageKey, StorageValue}; use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::OpChainSpecBuilder; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; + use reth_primitives::{Account, Block, BlockBody, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, }; diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 09aa76fefb8..011654909eb 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -231,9 +231,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{TxKind, U256}; + use alloy_primitives::{Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; - use reth::primitives::Signature; use reth_chainspec::MAINNET; use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::test_utils::MockEthProvider; diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 917baef6661..d86bd04c7b9 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,14 +1,14 @@ //! Common conversions from alloy types. use crate::{ - transaction::extract_chain_id, Block, BlockBody, Signature, Transaction, TransactionSigned, + transaction::extract_chain_id, Block, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; use alloc::{string::ToString, vec::Vec}; use alloy_consensus::{ constants::EMPTY_TRANSACTIONS, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; -use alloy_primitives::{Parity, TxKind}; +use alloy_primitives::{Parity, Signature, TxKind}; use alloy_rlp::Error as RlpError; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4f56b1ac4e6..be592e1c167 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use transaction::BlobTransactionValidationError; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, + InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, }; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f7d36be0ab8..adbd8f0d09c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -11,7 +11,7 @@ use alloy_eips::{ eip2930::AccessList, eip7702::SignedAuthorization, }; -use alloy_primitives::{keccak256, Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; @@ -36,9 +36,7 @@ pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; pub use compat::FillTxEnv; -pub use signature::{ - extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked, Signature, -}; +pub use signature::{extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; pub use variant::TransactionSignedVariant; @@ -2016,12 +2014,14 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, + transaction::{TxEip1559, TxKind, TxLegacy}, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; - use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; + use alloy_primitives::{ + address, b256, bytes, hex, Address, Bytes, Parity, Signature, B256, U256, + }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 000ff41fe52..11da5d8385f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -7,7 +7,7 @@ use super::{ TxEip7702, }; use crate::{ - BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, + BlobTransaction, BlobTransactionSidecar, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use alloy_consensus::{ @@ -16,7 +16,7 @@ use alloy_consensus::{ SignableTransaction, TxEip4844WithSidecar, }; use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, TxHash, B256}; +use alloy_primitives::{Address, Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 1e6560e152b..aa473ef8a3f 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,8 +1,8 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{Signature, Transaction, TransactionSigned}; +use crate::{Transaction, TransactionSigned}; use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; -use alloy_primitives::TxHash; +use alloy_primitives::{Signature, TxHash}; use alloy_rlp::Header; use serde::{Deserialize, Serialize}; diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 5bfdab8e68e..b73206e6e77 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,9 +1,7 @@ use crate::transaction::util::secp256k1; -use alloy_primitives::{Address, Parity, B256, U256}; +use alloy_primitives::{Address, Parity, Signature, B256, U256}; use alloy_rlp::{Decodable, Error as RlpError}; -pub use alloy_primitives::Signature; - /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. /// @@ -111,14 +109,11 @@ pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> #[cfg(test)] mod tests { - use crate::{ - transaction::signature::{ - legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, - }, - Signature, + use crate::transaction::signature::{ + legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{hex, Address, Parity, B256, U256}; + use alloy_primitives::{hex, Address, Parity, Signature, B256, U256}; use std::str::FromStr; #[test] diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7569400e94b..ff2c2e0dab5 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,5 +1,4 @@ -use crate::Signature; -use alloy_primitives::Address; +use alloy_primitives::{Address, Signature}; #[cfg(feature = "secp256k1")] pub(crate) mod secp256k1 { diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index ab11e62d543..36e9277400f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -1,10 +1,10 @@ //! An abstraction over ethereum signers. use alloy_dyn_abi::TypedData; -use alloy_primitives::Address; +use alloy_primitives::{Address, Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_types::SignError; use std::result; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 62f0e24b1c6..20952413c13 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,7 +1,7 @@ //! Utilities for serving `eth_simulateV1` use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; -use alloy_primitives::Parity; +use alloy_primitives::{Parity, Signature}; use alloy_rpc_types::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, Block, BlockTransactionsKind, @@ -10,8 +10,7 @@ use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, - TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs index 536f6ac5e5c..77ae365b2da 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/signature.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/signature.rs @@ -1,9 +1,9 @@ -use alloy_primitives::U256; +use alloy_primitives::{Signature as PrimitiveSignature, U256}; use alloy_rpc_types::{Parity, Signature}; -use reth_primitives::{transaction::legacy_parity, Signature as PrimitiveSignature, TxType}; +use reth_primitives::{transaction::legacy_parity, TxType}; /// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature), using the give chain id to compute the signature's +/// signature](alloy_primitives::Signature), using the give chain id to compute the signature's /// recovery id. /// /// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). @@ -20,7 +20,7 @@ pub fn from_legacy_primitive_signature( } /// Creates a new rpc signature from a non-legacy [primitive -/// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on +/// signature](alloy_primitives::Signature). This sets the `v` value to `0` or `1` depending on /// the signature's `odd_y_parity`. pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { Signature { @@ -32,7 +32,7 @@ pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signatur } /// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature). +/// signature](alloy_primitives::Signature). /// /// The tx type is used to determine whether or not to use the `chain_id` to compute the /// signature's recovery id. diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index e59be0ac283..c6c60312730 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -6,11 +6,11 @@ use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; -use alloy_primitives::{eip191_hash_message, Address, B256}; +use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index d524f47cc75..19f885e27a8 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -7,12 +7,13 @@ use alloy_primitives::{ }; use alloy_eips::eip4895::Withdrawal; +use alloy_primitives::Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Signature, Transaction, TransactionSigned, TxType, Withdrawals, + Transaction, TransactionSigned, TxType, Withdrawals, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index a3cddaf0a71..c6143ff16c8 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -12,7 +12,7 @@ use alloy_consensus::{ TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList, eip4844::DATA_GAS_PER_BLOB}; -use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, @@ -20,7 +20,7 @@ use rand::{ }; use reth_primitives::{ transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Signature, Transaction, + BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2667143b7c8..709b43e7132 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1498,7 +1498,8 @@ mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; use alloy_eips::eip4844::DATA_GAS_PER_BLOB; - use reth_primitives::{Signature, TransactionSigned}; + use alloy_primitives::Signature; + use reth_primitives::TransactionSigned; #[test] fn test_pool_size_invariants() { diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 84225ea72cd..83fcf4484a0 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -464,8 +464,8 @@ mod tests { use super::*; use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::{hex, Parity}; - use reth_primitives::{public_key_to_address, Signature}; + use alloy_primitives::{hex, Parity, Signature}; + use reth_primitives::public_key_to_address; use std::str::FromStr; #[test] From e3173407e103fd036ab6240d599668292cf1190a Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Tue, 5 Nov 2024 04:07:58 -0600 Subject: [PATCH 315/970] renamed OptimismEngineValidator to OpEngineValidator (#12312) --- crates/optimism/node/src/engine.rs | 26 +++++++++++++------------- crates/optimism/node/src/node.rs | 14 +++++++------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index d956f0cd5cd..eb356e86e1d 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -58,11 +58,11 @@ impl PayloadTypes for OpPayloadTypes { /// Validator for Optimism engine API. #[derive(Debug, Clone)] -pub struct OptimismEngineValidator { +pub struct OpEngineValidator { chain_spec: Arc, } -impl OptimismEngineValidator { +impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -111,7 +111,7 @@ pub fn validate_withdrawals_presence( Ok(()) } -impl EngineValidator for OptimismEngineValidator +impl EngineValidator for OpEngineValidator where Types: EngineTypes, { @@ -220,10 +220,10 @@ mod test { #[test] fn test_well_formed_attributes_pre_holocene() { - let validator = OptimismEngineValidator::new(get_chainspec(false)); + let validator = OpEngineValidator::new(get_chainspec(false)); let attributes = get_attributes(None, 1799999999); - let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes @@ -233,10 +233,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_no_eip1559_params() { - let validator = OptimismEngineValidator::new(get_chainspec(true)); + let validator = OpEngineValidator::new(get_chainspec(true)); let attributes = get_attributes(None, 1800000000); - let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes @@ -246,10 +246,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { - let validator = OptimismEngineValidator::new(get_chainspec(true)); + let validator = OpEngineValidator::new(get_chainspec(true)); let attributes = get_attributes(Some(b64!("0000000000000008")), 1800000000); - let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes @@ -259,10 +259,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_valid() { - let validator = OptimismEngineValidator::new(get_chainspec(true)); + let validator = OpEngineValidator::new(get_chainspec(true)); let attributes = get_attributes(Some(b64!("0000000800000008")), 1800000000); - let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes @@ -272,10 +272,10 @@ mod test { #[test] fn test_well_formed_attributes_holocene_valid_all_zero() { - let validator = OptimismEngineValidator::new(get_chainspec(true)); + let validator = OpEngineValidator::new(get_chainspec(true)); let attributes = get_attributes(Some(b64!("0000000000000000")), 1800000000); - let result = >::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 7e6f1d0981c..14088e636f1 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -34,7 +34,7 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{ args::RollupArgs, - engine::OptimismEngineValidator, + engine::OpEngineValidator, txpool::{OpTransactionPool, OpTransactionValidator}, OpEngineTypes, }; @@ -150,7 +150,7 @@ impl OptimismAddOns { impl NodeAddOns for OptimismAddOns where N: FullNodeComponents>, - OptimismEngineValidator: EngineValidator<::Engine>, + OpEngineValidator: EngineValidator<::Engine>, { type Handle = RpcHandle>; @@ -165,7 +165,7 @@ where impl RethRpcAddOns for OptimismAddOns where N: FullNodeComponents>, - OptimismEngineValidator: EngineValidator<::Engine>, + OpEngineValidator: EngineValidator<::Engine>, { type EthApi = OpEthApi; @@ -456,7 +456,7 @@ where } } -/// Builder for [`OptimismEngineValidator`]. +/// Builder for [`OpEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] pub struct OptimismEngineValidatorBuilder; @@ -465,11 +465,11 @@ impl EngineValidatorBuilder for OptimismEngineValidatorBuilde where Types: NodeTypesWithEngine, Node: FullNodeComponents, - OptimismEngineValidator: EngineValidator, + OpEngineValidator: EngineValidator, { - type Validator = OptimismEngineValidator; + type Validator = OpEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { - Ok(OptimismEngineValidator::new(ctx.config.chain.clone())) + Ok(OpEngineValidator::new(ctx.config.chain.clone())) } } From 4222cbe6826715a5b7b70e8505984ffb613b7078 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Nov 2024 12:11:04 +0100 Subject: [PATCH 316/970] chore: switch op to new engine (#12329) --- crates/optimism/bin/src/main.rs | 11 +++++++---- crates/optimism/node/src/args.rs | 14 +++++++++++--- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index c6d3e32b7cf..6c440f43491 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -23,10 +23,13 @@ fn main() { if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; + if rollup_args.experimental { + tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); + } + let use_legacy_engine = rollup_args.legacy; let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { + match use_legacy_engine { + false => { let engine_tree_config = TreeConfig::default() .with_persistence_threshold(rollup_args.persistence_threshold) .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); @@ -46,7 +49,7 @@ fn main() { handle.node_exit_future.await } - false => { + true => { let handle = builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 54be83dc510..b84e98d28b1 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -38,16 +38,23 @@ pub struct RollupArgs { #[arg(long = "rollup.discovery.v4", default_value = "false")] pub discovery_v4: bool, - /// Enable the engine2 experimental features on op-reth binary + /// Enable the experimental engine features on reth binary + /// + /// DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy + /// functionality #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + /// Enable the legacy engine on reth binary + #[arg(long = "engine.legacy", default_value = "false")] + pub legacy: bool, + /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } @@ -60,6 +67,7 @@ impl Default for RollupArgs { compute_pending_block: false, discovery_v4: false, experimental: false, + legacy: false, persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, } From 556995fc5d8df8f6614a3cd2a7fc152cac3d3efe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Nov 2024 12:23:20 +0100 Subject: [PATCH 317/970] chore: limit initial status size (#12324) --- crates/net/eth-wire/src/ethstream.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 74f3fab2be6..8ae599b6792 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -21,6 +21,9 @@ use tracing::{debug, trace}; // https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50 pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; +/// [`MAX_STATUS_SIZE`] is the maximum cap on the size of the initial status message +pub(crate) const MAX_STATUS_SIZE: usize = 500 * 1024; + /// An un-authenticated [`EthStream`]. This is consumed and returns a [`EthStream`] after the /// `Status` handshake is completed. #[pin_project] @@ -97,7 +100,7 @@ where } }?; - if their_msg.len() > MAX_MESSAGE_SIZE { + if their_msg.len() > MAX_STATUS_SIZE { self.inner.disconnect(DisconnectReason::ProtocolBreach).await?; return Err(EthStreamError::MessageTooBig(their_msg.len())) } From 441ddbf0858e809556cbb6e85cde860df592c8b2 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 5 Nov 2024 12:37:56 +0100 Subject: [PATCH 318/970] primitives: rm more alloy block reexports (#12308) --- Cargo.lock | 3 ++- crates/optimism/node/src/txpool.rs | 2 +- crates/optimism/rpc/src/eth/pending_block.rs | 5 ++--- crates/payload/basic/src/lib.rs | 6 ++---- crates/primitives/src/block.rs | 8 +++++--- crates/primitives/src/lib.rs | 5 +---- crates/rpc/rpc-api/Cargo.toml | 1 - crates/rpc/rpc-api/src/debug.rs | 3 +-- crates/rpc/rpc-builder/tests/it/http.rs | 4 ++-- crates/rpc/rpc-eth-api/src/core.rs | 3 +-- crates/rpc/rpc-eth-api/src/helpers/state.rs | 6 +++--- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 2 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 4 ++-- crates/rpc/rpc-testing-util/src/trace.rs | 2 +- crates/rpc/rpc/src/debug.rs | 4 ++-- crates/rpc/rpc/src/engine.rs | 3 +-- crates/rpc/rpc/src/eth/core.rs | 5 +++-- crates/rpc/rpc/src/otterscan.rs | 3 +-- crates/transaction-pool/src/maintain.rs | 3 ++- examples/custom-inspector/Cargo.toml | 1 + examples/custom-inspector/src/main.rs | 2 +- examples/custom-payload-builder/Cargo.toml | 1 + examples/custom-payload-builder/src/generator.rs | 3 ++- 23 files changed, 38 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d40ebfb2c7..7c0c4f318b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2890,6 +2890,7 @@ dependencies = [ name = "example-custom-inspector" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "clap", @@ -2913,6 +2914,7 @@ dependencies = [ name = "example-custom-payload-builder" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "eyre", "futures-util", @@ -8700,7 +8702,6 @@ dependencies = [ "jsonrpsee", "reth-engine-primitives", "reth-network-peers", - "reth-primitives", "reth-rpc-eth-api", "serde", "serde_with", diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 011654909eb..b1255a987e9 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -74,7 +74,7 @@ where pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = - this.inner.client().block_by_number_or_tag(reth_primitives::BlockNumberOrTag::Latest) + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) { // genesis block has no txs, so we can't extract L1 info, we set the block info to empty // so that we will accept txs into the pool before the first block diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 3b3b7845cc1..c90b3f7b794 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,12 +1,11 @@ //! Loads OP pending block for a RPC response. +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{ - revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, -}; +use reth_primitives::{revm_primitives::BlockEnv, Header, Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7b1de980ce9..d6dcdf114b0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,7 +10,7 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::merge::SLOT_DURATION; +use alloy_eips::{merge::SLOT_DURATION, BlockNumberOrTag}; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -20,9 +20,7 @@ use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJo use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_primitives::{ - constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedHeader, Withdrawals, -}; +use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader, Withdrawals}; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b9f43df5d7c..5f32728489c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,7 +2,6 @@ use crate::{ GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, }; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{BlockNumberOrTag, ForkBlock, RpcBlockHash}; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; @@ -904,8 +903,11 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use super::{BlockNumberOrTag::*, *}; - use alloy_eips::{eip1898::HexStringMissingPrefixError, BlockId}; + use super::*; + use alloy_eips::{ + eip1898::HexStringMissingPrefixError, BlockId, BlockNumberOrTag, BlockNumberOrTag::*, + RpcBlockHash, + }; use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index be592e1c167..9b121a56fa4 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -33,10 +33,7 @@ pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{ - Block, BlockBody, BlockNumberOrTag, BlockWithSenders, ForkBlock, RpcBlockHash, SealedBlock, - SealedBlockWithSenders, -}; +pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; #[cfg(feature = "reth-codec")] pub use compression::*; pub use constants::HOLESKY_GENESIS_HASH; diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 60146e8b2c2..75c06a2554c 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 162699c6ebc..d1837787d54 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,4 +1,4 @@ -use alloy_eips::BlockId; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; @@ -7,7 +7,6 @@ use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockNumberOrTag; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 5c33a5d34df..b5faa71cc5e 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,7 +2,7 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; -use alloy_eips::BlockId; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; use alloy_rpc_types_eth::{ transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, @@ -19,7 +19,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{BlockNumberOrTag, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 185297c2255..a89364a15db 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,7 +1,7 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. use alloy_dyn_abi::TypedData; -use alloy_eips::{eip2930::AccessListResult, BlockId}; +use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; use alloy_rpc_types::{ @@ -13,7 +13,6 @@ use alloy_rpc_types::{ }; use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockNumberOrTag; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index d980b9114b1..702572064c5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -30,7 +30,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, @@ -184,7 +184,7 @@ pub trait LoadState: /// Returns the state at the given [`BlockId`] enum. /// - /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// Note: if not [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this /// will only return canonical state. See also fn state_at_block_id(&self, at: BlockId) -> Result { self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) @@ -302,7 +302,7 @@ pub trait LoadState: /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 065ac1acc20..9da373376bd 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -2,11 +2,11 @@ //! previous blocks. use alloy_consensus::constants::GWEI_TO_WEI; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives::BlockNumberOrTag; use reth_rpc_server_types::constants; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index d3e7c4158ac..d8f413650a3 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,10 +4,10 @@ use std::time::Instant; -use alloy_eips::BlockId; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index efb1f3674e0..097d582df45 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -514,9 +514,9 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::BlockNumberOrTag; use alloy_rpc_types_trace::filter::TraceFilterMode; use jsonrpsee::http_client::HttpClientBuilder; - use reth_primitives::BlockNumberOrTag; const fn assert_is_stream(_: &St) {} diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 6a73af69d92..7e4c8fb8230 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,4 +1,4 @@ -use alloy_eips::{eip2718::Encodable2718, BlockId}; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types::{ @@ -19,7 +19,7 @@ use reth_evm::{ system_calls::SystemCaller, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockNumberOrTag, TransactionSignedEcRecovered}; +use reth_primitives::{Block, TransactionSignedEcRecovered}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 0ff90d39998..ac4de7c74e1 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,4 +1,4 @@ -use alloy_eips::BlockId; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; use alloy_rpc_types::{ state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, @@ -6,7 +6,6 @@ use alloy_rpc_types::{ use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::BlockNumberOrTag; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 98ac9e9f409..c491ca21dfb 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,10 +3,10 @@ use std::sync::Arc; +use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; -use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, @@ -400,13 +400,14 @@ impl EthApiInner Date: Tue, 5 Nov 2024 13:00:16 +0100 Subject: [PATCH 319/970] chore: 1.1.1 (#12334) --- Cargo.lock | 238 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 120 insertions(+), 120 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c0c4f318b0..d3fcd59e79f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2605,7 +2605,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -5368,7 +5368,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "reth-cli-util", @@ -6296,7 +6296,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6368,7 +6368,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6399,7 +6399,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6425,7 +6425,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-genesis", @@ -6477,7 +6477,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6512,7 +6512,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6550,7 +6550,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6563,7 +6563,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6592,7 +6592,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6613,7 +6613,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-genesis", "clap", @@ -6626,7 +6626,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.0" +version = "1.1.1" dependencies = [ "ahash", "alloy-eips", @@ -6692,7 +6692,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-tasks", "tokio", @@ -6701,7 +6701,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6719,7 +6719,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6741,7 +6741,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.0" +version = "1.1.1" dependencies = [ "convert_case", "proc-macro2", @@ -6752,7 +6752,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "eyre", @@ -6768,7 +6768,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6779,7 +6779,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6795,7 +6795,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6818,7 +6818,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -6858,7 +6858,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -6885,7 +6885,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6914,7 +6914,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -6930,7 +6930,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6957,7 +6957,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6981,7 +6981,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7009,7 +7009,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7046,7 +7046,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7082,7 +7082,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.0" +version = "1.1.1" dependencies = [ "aes", "alloy-primitives", @@ -7112,7 +7112,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7142,7 +7142,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "reth-execution-types", @@ -7154,7 +7154,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures", "pin-project", @@ -7182,7 +7182,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7230,7 +7230,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7262,7 +7262,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7274,7 +7274,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7309,7 +7309,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7332,7 +7332,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "eyre", @@ -7343,7 +7343,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7357,7 +7357,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7376,7 +7376,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7396,7 +7396,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7422,7 +7422,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "rayon", @@ -7432,7 +7432,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7459,7 +7459,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7482,7 +7482,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7497,7 +7497,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7514,7 +7514,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7558,7 +7558,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "eyre", @@ -7591,7 +7591,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7607,7 +7607,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "serde", "serde_json", @@ -7616,7 +7616,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7640,7 +7640,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "async-trait", "bytes", @@ -7662,7 +7662,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.0" +version = "1.1.1" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7683,7 +7683,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.0" +version = "1.1.1" dependencies = [ "bindgen", "cc", @@ -7691,7 +7691,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures", "metrics", @@ -7702,14 +7702,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures-util", "if-addrs", @@ -7723,7 +7723,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7783,7 +7783,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7805,7 +7805,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7825,7 +7825,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7841,7 +7841,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7854,7 +7854,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.0" +version = "1.1.1" dependencies = [ "anyhow", "bincode", @@ -7872,7 +7872,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -7893,7 +7893,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -7958,7 +7958,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8007,7 +8007,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8053,7 +8053,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8076,7 +8076,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.0" +version = "1.1.1" dependencies = [ "eyre", "http", @@ -8101,7 +8101,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8113,7 +8113,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8133,7 +8133,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8179,7 +8179,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8195,7 +8195,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8222,7 +8222,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8233,7 +8233,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-genesis", @@ -8278,7 +8278,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8312,7 +8312,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8321,7 +8321,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8361,7 +8361,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-codecs", "reth-db-api", @@ -8372,7 +8372,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8393,7 +8393,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8415,7 +8415,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8425,7 +8425,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8469,7 +8469,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8497,7 +8497,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8546,7 +8546,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -8575,7 +8575,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -8595,7 +8595,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8612,7 +8612,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8683,7 +8683,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8709,7 +8709,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8729,7 +8729,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8779,7 +8779,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8815,7 +8815,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8857,7 +8857,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8900,7 +8900,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-rpc-types-engine", "http", @@ -8915,7 +8915,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8930,7 +8930,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8947,7 +8947,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8996,7 +8996,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "aquamarine", @@ -9024,7 +9024,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -9041,7 +9041,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -9063,7 +9063,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "clap", @@ -9074,7 +9074,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9093,7 +9093,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9105,7 +9105,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "auto_impl", "dyn-clone", @@ -9122,7 +9122,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9135,7 +9135,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "tokio", "tokio-stream", @@ -9144,7 +9144,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "eyre", @@ -9158,7 +9158,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9203,7 +9203,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9232,7 +9232,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9256,7 +9256,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9285,7 +9285,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9311,7 +9311,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", diff --git a/Cargo.toml b/Cargo.toml index 8c5cf7b0cd5..10d39b11b99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.0" +version = "1.1.1" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" From 15c230bac20e2b1b3532c8b0d470e815fbc0cc22 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:17:28 +0100 Subject: [PATCH 320/970] primitives: rm alloy `BlobTransactionValidationError` reexport (#12311) --- crates/primitives/src/lib.rs | 3 --- crates/primitives/src/transaction/mod.rs | 2 -- crates/primitives/src/transaction/sidecar.rs | 5 +---- crates/transaction-pool/src/error.rs | 3 ++- crates/transaction-pool/src/test_utils/mock.rs | 12 ++++++++---- crates/transaction-pool/src/traits.rs | 10 +++++++--- 6 files changed, 18 insertions(+), 17 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 9b121a56fa4..09610bf7458 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -51,9 +51,6 @@ pub use transaction::{ PooledTransactionsElementEcRecovered, }; -#[cfg(feature = "c-kzg")] -pub use transaction::BlobTransactionValidationError; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index adbd8f0d09c..aa6aaa2d83e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -31,8 +31,6 @@ pub use error::{ }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -#[cfg(feature = "c-kzg")] -pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; pub use compat::FillTxEnv; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index aa473ef8a3f..e7ff7a9b508 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -9,9 +9,6 @@ use serde::{Deserialize, Serialize}; #[doc(inline)] pub use alloy_eips::eip4844::BlobTransactionSidecar; -#[cfg(feature = "c-kzg")] -pub use alloy_eips::eip4844::BlobTransactionValidationError; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -58,7 +55,7 @@ impl BlobTransaction { pub fn validate( &self, proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { self.transaction.validate_blob(proof_settings) } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index a4766a89d5c..f71bf018807 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,7 +1,8 @@ //! Transaction pool errors +use alloy_eips::eip4844::BlobTransactionValidationError; use alloy_primitives::{Address, TxHash, U256}; -use reth_primitives::{BlobTransactionValidationError, InvalidTransactionError}; +use reth_primitives::InvalidTransactionError; /// Transaction pool result type. pub type PoolResult = Result; diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index c6143ff16c8..a272e8d00ed 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -11,7 +11,11 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; -use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList, eip4844::DATA_GAS_PER_BLOB}; +use alloy_eips::{ + eip1559::MIN_PROTOCOL_BASE_FEE, + eip2930::AccessList, + eip4844::{BlobTransactionValidationError, DATA_GAS_PER_BLOB}, +}; use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use paste::paste; use rand::{ @@ -20,8 +24,8 @@ use rand::{ }; use reth_primitives::{ transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElementEcRecovered, Transaction, - TransactionSigned, TransactionSignedEcRecovered, TxType, + PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TxType, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -761,7 +765,7 @@ impl EthPoolTransaction for MockTransaction { &self, _blob: &BlobTransactionSidecar, _settings: &reth_primitives::kzg::KzgSettings, - ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { match &self { Self::Eip4844 { .. } => Ok(()), _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 709b43e7132..512e3e31f12 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -11,15 +11,19 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, Transaction as _, }; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; +use alloy_eips::{ + eip2718::Encodable2718, + eip2930::AccessList, + eip4844::{BlobAndProofV1, BlobTransactionValidationError}, +}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, + TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; From 5af551782c9084bcd61d15529b167b90bcc86c9c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Nov 2024 15:04:52 +0100 Subject: [PATCH 321/970] feat: restructure op builder (#12229) --- crates/optimism/payload/src/builder.rs | 888 ++++++++++++++++--------- crates/payload/basic/src/lib.rs | 31 + crates/transaction-pool/src/traits.rs | 5 + 3 files changed, 613 insertions(+), 311 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0cf45835a23..35e1d905a46 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,13 +1,15 @@ //! Optimism payload builder implementation. -use std::sync::Arc; + +use std::{fmt::Display, sync::Arc}; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::{B64, U256}; +use alloy_primitives::{Address, Bytes, B64, U256}; +use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpecProvider; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; @@ -16,18 +18,19 @@ use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, + Block, BlockBody, Header, Receipt, SealedHeader, TransactionSigned, TxType, }; -use reth_provider::StateProviderFactory; +use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, BestTransactionsFor, + TransactionPool, }; use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, - DatabaseCommit, + Database, DatabaseCommit, }; use tracing::{debug, trace, warn}; @@ -71,7 +74,7 @@ impl OpPayloadBuilder { } impl OpPayloadBuilder where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvm
, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). @@ -87,6 +90,62 @@ where }; self.evm_config.next_cfg_and_block_env(parent, next_attributes) } + + /// Constructs an Optimism payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including an Optimism client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> + where + Client: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + { + let (initialized_cfg, initialized_block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel, + best_payload, + }; + + let builder = OpBuilder { + pool, + // TODO(mattsse): make this configurable in the `OpPayloadBuilder` directly via an + // additional generic + best: best_txs::, + }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + + if ctx.attributes().no_tx_pool { + let db = State::builder().with_database(state).with_bundle_update().build(); + builder.build(db, ctx) + } else { + // sequencer mode we can reuse cachedreads from previous runs + let db = State::builder() + .with_database(cached_reads.as_db_mut(state)) + .with_bundle_update() + .build(); + builder.build(db, ctx) + } + .map(|out| out.with_cached_reads(cached_reads)) + } } /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. @@ -103,10 +162,7 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self - .cfg_and_block_env(&args.config, &args.config.parent_header) - .map_err(PayloadBuilderError::other)?; - optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) + self.build_payload(args) } fn on_missing_payload( @@ -134,198 +190,534 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self - .cfg_and_block_env(&args.config, &args.config.parent_header) - .map_err(PayloadBuilderError::other)?; - optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + self.build_payload(args)?.into_payload().ok_or_else(|| PayloadBuilderError::MissingPayload) } } -/// Constructs an Optimism transaction payload from the transactions sent through the -/// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in -/// the payload attributes, the transaction pool will be ignored and the only transactions -/// included in the payload will be those sent through the attributes. +/// The type that builds the payload. +/// +/// Payload building for optimism is composed of several steps. +/// The first steps are mandatory and defined by the protocol. +/// +/// 1. first all System calls are applied. +/// 2. After canyon the forced deployed `create2deployer` must be loaded +/// 3. all sequencer transactions are executed (part of the payload attributes) /// -/// Given build arguments including an Optimism client, transaction pool, -/// and configuration, this function creates a transaction payload. Returns -/// a result indicating success with the payload or an error in case of failure. -#[inline] -pub(crate) fn optimism_payload( - evm_config: &EvmConfig, - args: BuildArguments, - initialized_cfg: CfgEnvWithHandlerCfg, - initialized_block_env: BlockEnv, - _compute_pending_block: bool, -) -> Result, PayloadBuilderError> +/// Depending on whether the node acts as a sequencer and is allowed to include additional +/// transactions (`no_tx_pool == false`): +/// 4. include additional transactions +/// +/// And finally +/// 5. build the block: compute all roots (txs, state) +#[derive(Debug)] +pub struct OpBuilder { + /// The transaction pool + pool: Pool, + /// Yields the best transaction to include if transactions from the mempool are allowed. + // TODO(mattsse): convert this to a trait + best: Best, +} + +impl OpBuilder where - EvmConfig: ConfigureEvm
, - Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, + Best: FnOnce(Pool, BestTransactionsAttributes) -> BestTransactionsFor, { - let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + /// Builds the payload on top of the state. + pub fn build( + self, + mut db: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm
, + DB: Database + AsRef

, + P: StateRootProvider, + { + let Self { pool, best } = self; + debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + + // 1. apply eip-4788 pre block contract call + ctx.apply_pre_beacon_root_contract_call(&mut db)?; + + // 2. ensure create2deployer is force deployed + ctx.ensure_create2_deployer(&mut db)?; + + // 3. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(&mut db)?; + + // 4. if mem pool transactions are requested we execute them + if !ctx.attributes().no_tx_pool { + let best_txs = best(pool, ctx.best_transaction_attributes()); + if let Some(cancelled) = + ctx.execute_best_transactions::<_, Pool>(&mut info, &mut db, best_txs)? + { + return Ok(cancelled) + } - let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_header.hash())?; - let state = StateProviderDatabase::new(state_provider); - let mut db = - State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); - let PayloadConfig { parent_header, attributes, mut extra_data } = config; + // check if the new payload is even more valuable + if !ctx.is_better_payload(info.total_fees) { + // can skip building the block + return Ok(BuildOutcomeKind::Aborted { fees: info.total_fees }) + } + } - debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); + let WithdrawalsOutcome { withdrawals_root, withdrawals } = + ctx.commit_withdrawals(&mut db)?; - let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = attributes.gas_limit.unwrap_or_else(|| { - initialized_block_env.gas_limit.try_into().unwrap_or(chain_spec.max_gas_limit) - }); - let base_fee = initialized_block_env.basefee.to::(); + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + db.merge_transitions(BundleRetention::Reverts); - let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); - let mut executed_senders = Vec::with_capacity(attributes.transactions.len()); + let block_number = ctx.block_number(); + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![info.receipts.clone()].into(), + block_number, + Vec::new(), + ); + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = + execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); + + // // calculate the state root + let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let (state_root, trie_output) = { + db.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( + |err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + }, + )? + }; - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( - base_fee, - initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), - )); + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.initialized_block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash: None, + }; - let mut total_fees = U256::ZERO; + // seal the block + let block = Block { + header, + body: BlockBody { + transactions: info.executed_transactions, + ommers: vec![], + withdrawals, + }, + }; - let block_number = initialized_block_env.number.to::(); + let sealed_block = block.seal_slow(); + debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - let is_regolith = - chain_spec.is_regolith_active_at_timestamp(attributes.payload_attributes.timestamp); + // create the executed block data + let executed = ExecutedBlock { + block: Arc::new(sealed_block.clone()), + senders: Arc::new(info.executed_senders), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; - // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); + let no_tx_pool = ctx.attributes().no_tx_pool; - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &initialized_cfg, - &initialized_block_env, - attributes.payload_attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_header=%parent_header.hash(), - %err, - "failed to apply beacon root contract call for payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - reth_optimism_evm::ensure_create2_deployer( - chain_spec.clone(), - attributes.payload_attributes.timestamp, - &mut db, - ) - .map_err(|err| { - warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); - PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) - })?; - - let mut receipts = Vec::with_capacity(attributes.transactions.len()); - for sequencer_tx in &attributes.transactions { - // Check if the job was cancelled, if so we can exit early. - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + let payload = OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + ctx.chain_spec.clone(), + ctx.config.attributes, + Some(executed), + ); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) } + } +} + +fn best_txs( + pool: Pool, + attr: BestTransactionsAttributes, +) -> BestTransactionsFor { + pool.best_transactions_with_attributes(attr) +} - // A sequencer's block should never contain blob transactions. - if sequencer_tx.value().is_eip4844() { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) +/// This acts as the container for executed transactions and its byproducts (receipts, gas used) +#[derive(Default, Debug)] +pub struct ExecutionInfo { + /// All executed transactions (unrecovered). + pub executed_transactions: Vec, + /// The recovered senders for the executed transactions. + pub executed_senders: Vec

, + /// The transaction receipts + pub receipts: Vec>, + /// All gas used so far + pub cumulative_gas_used: u64, + /// Tracks fees from executed mempool transactions + pub total_fees: U256, +} + +impl ExecutionInfo { + /// Create a new instance with allocated slots. + pub fn with_capacity(capacity: usize) -> Self { + Self { + executed_transactions: Vec::with_capacity(capacity), + executed_senders: Vec::with_capacity(capacity), + receipts: Vec::with_capacity(capacity), + cumulative_gas_used: 0, + total_fees: U256::ZERO, } + } +} - // Convert the transaction to a [TransactionSignedEcRecovered]. This is - // purely for the purposes of utilizing the `evm_config.tx_env`` function. - // Deposit transactions do not have signatures, so if the tx is a deposit, this - // will just pull in its `from` address. - let sequencer_tx = sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::TransactionEcRecoverFailed) - })?; - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && sequencer_tx.is_deposit()) - .then(|| { - db.load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::AccountLoadFailed( - sequencer_tx.signer(), - )) +/// Container type that holds all necessities to build a new payload. +#[derive(Debug)] +pub struct OpPayloadBuilderCtx { + /// The type that knows how to perform system calls and configure the evm. + pub evm_config: EvmConfig, + /// The chainspec + pub chain_spec: Arc, + /// How to build the payload. + pub config: PayloadConfig, + /// Evm Settings + pub initialized_cfg: CfgEnvWithHandlerCfg, + /// Block config + pub initialized_block_env: BlockEnv, + /// Marker to check whether the job has been cancelled. + pub cancel: Cancelled, + /// The currently best payload. + pub best_payload: Option, +} + +impl OpPayloadBuilderCtx { + /// Returns the parent block the payload will be build on. + pub fn parent(&self) -> &SealedHeader { + &self.config.parent_header + } + + /// Returns the builder attributes. + pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { + &self.config.attributes + } + + /// Returns the block gas limit to target. + pub fn block_gas_limit(&self) -> u64 { + self.attributes() + .gas_limit + .unwrap_or_else(|| self.initialized_block_env.gas_limit.saturating_to()) + } + + /// Returns the block number for the block. + pub fn block_number(&self) -> u64 { + self.initialized_block_env.number.to() + } + + /// Returns the current base fee + pub fn base_fee(&self) -> u64 { + self.initialized_block_env.basefee.to() + } + + /// Returns the current blob gas price. + pub fn get_blob_gasprice(&self) -> Option { + self.initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64) + } + + /// Returns the blob fields for the header. + /// + /// This will always return `Some(0)` after ecotone. + pub fn blob_fields(&self) -> (Option, Option) { + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + if self.is_ecotone_active() { + (Some(0), Some(0)) + } else { + (None, None) + } + } + + /// Returns the extra data for the block. + /// + /// After holocene this extracts the extradata from the paylpad + pub fn extra_data(&self) -> Result { + if self.is_holocene_active() { + self.attributes() + .get_holocene_extra_data( + self.chain_spec.base_fee_params_at_timestamp( + self.attributes().payload_attributes.timestamp, + ), + ) + .map_err(PayloadBuilderError::other) + } else { + Ok(self.config.extra_data.clone()) + } + } + + /// Returns the current fee settings for transactions from the mempool + pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { + BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) + } + + /// Returns the unique id for this payload job. + pub fn payload_id(&self) -> PayloadId { + self.attributes().payload_id() + } + + /// Returns true if regolith is active for the payload. + pub fn is_regolith_active(&self) -> bool { + self.chain_spec.is_regolith_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if ecotone is active for the payload. + pub fn is_ecotone_active(&self) -> bool { + self.chain_spec.is_ecotone_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if canyon is active for the payload. + pub fn is_canyon_active(&self) -> bool { + self.chain_spec.is_canyon_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if holocene is active for the payload. + pub fn is_holocene_active(&self) -> bool { + self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if the fees are higher than the previous payload. + pub fn is_better_payload(&self, total_fees: U256) -> bool { + is_better_payload(self.best_payload.as_ref(), total_fees) + } + + /// Commits the withdrawals from the payload attributes to the state. + pub fn commit_withdrawals( + &self, + db: &mut State, + ) -> Result + where + DB: Database, + { + commit_withdrawals( + db, + &self.chain_spec, + self.attributes().payload_attributes.timestamp, + self.attributes().payload_attributes.withdrawals.clone(), + ) + } + + /// Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + /// blocks will always have at least a single transaction in them (the L1 info transaction), + /// so we can safely assume that this will always be triggered upon the transition and that + /// the above check for empty blocks will never be hit on OP chains. + pub fn ensure_create2_deployer(&self, db: &mut State) -> Result<(), PayloadBuilderError> + where + DB: Database, + DB::Error: Display, + { + reth_optimism_evm::ensure_create2_deployer( + self.chain_spec.clone(), + self.attributes().payload_attributes.timestamp, + db, + ) + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); + PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) + }) + } +} + +impl OpPayloadBuilderCtx +where + EvmConfig: ConfigureEvm
, +{ + /// apply eip-4788 pre block contract call + pub fn apply_pre_beacon_root_contract_call( + &self, + db: &mut DB, + ) -> Result<(), PayloadBuilderError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + SystemCaller::new(self.evm_config.clone(), self.chain_spec.clone()) + .pre_block_beacon_root_contract_call( + db, + &self.initialized_cfg, + &self.initialized_block_env, + self.attributes().payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_header=%self.parent().hash(), + %err, + "failed to apply beacon root contract call for payload" + ); + PayloadBuilderError::Internal(err.into()) })?; - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), - ); + Ok(()) + } - let mut evm = evm_config.evm_with_env(&mut db, env); + /// Executes all sequencer transactions that are included in the payload attributes. + pub fn execute_sequencer_transactions( + &self, + db: &mut State, + ) -> Result + where + DB: Database, + { + let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); + + for sequencer_tx in &self.attributes().transactions { + // A sequencer's block should never contain blob transactions. + if sequencer_tx.value().is_eip4844() { + return Err(PayloadBuilderError::other( + OptimismPayloadBuilderError::BlobTransactionRejected, + )) + } - let ResultAndState { result, state } = match evm.transact() { - Ok(res) => res, - Err(err) => { - match err { - EVMError::Transaction(err) => { - trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(err)) + // Convert the transaction to a [TransactionSignedEcRecovered]. This is + // purely for the purposes of utilizing the `evm_config.tx_env`` function. + // Deposit transactions do not have signatures, so if the tx is a deposit, this + // will just pull in its `from` address. + let sequencer_tx = + sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { + PayloadBuilderError::other( + OptimismPayloadBuilderError::TransactionEcRecoverFailed, + ) + })?; + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (self.is_regolith_active() && sequencer_tx.is_deposit()) + .then(|| { + db.load_cache_account(sequencer_tx.signer()) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + PayloadBuilderError::other(OptimismPayloadBuilderError::AccountLoadFailed( + sequencer_tx.signer(), + )) + })?; + + let env = EnvWithHandlerCfg::new_with_cfg_env( + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + self.evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), + ); + + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + + let ResultAndState { result, state } = match evm.transact() { + Ok(res) => res, + Err(err) => { + match err { + EVMError::Transaction(err) => { + trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(err)) + } } } - } - }; + }; - // to release the db reference drop evm. - drop(evm); - // commit changes - db.commit(state); - - let gas_used = result.gas_used(); - - // add gas used by the transaction to cumulative gas used, before creating the receipt - cumulative_gas_used += gas_used; - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { - tx_type: sequencer_tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process - // ensures this is only set for post-Canyon deposit transactions. - deposit_receipt_version: chain_spec - .is_canyon_active_at_timestamp(attributes.payload_attributes.timestamp) - .then_some(1), - })); - - // append sender and transaction to the respective lists - executed_senders.push(sequencer_tx.signer()); - executed_txs.push(sequencer_tx.into_signed()); + // to release the db reference drop evm. + drop(evm); + // commit changes + db.commit(state); + + let gas_used = result.gas_used(); + + // add gas used by the transaction to cumulative gas used, before creating the receipt + info.cumulative_gas_used += gas_used; + + // Push transaction changeset and calculate header bloom filter for receipt. + info.receipts.push(Some(Receipt { + tx_type: sequencer_tx.tx_type(), + success: result.is_success(), + cumulative_gas_used: info.cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process + // ensures this is only set for post-Canyon deposit transactions. + deposit_receipt_version: self.is_canyon_active().then_some(1), + })); + + // append sender and transaction to the respective lists + info.executed_senders.push(sequencer_tx.signer()); + info.executed_transactions.push(sequencer_tx.into_signed()); + } + + Ok(info) } - if !attributes.no_tx_pool { + /// Executes the given best transactions and updates the execution info + pub fn execute_best_transactions( + &self, + info: &mut ExecutionInfo, + db: &mut State, + mut best_txs: BestTransactionsFor, + ) -> Result>, PayloadBuilderError> + where + DB: Database, + Pool: TransactionPool, + { + let block_gas_limit = self.block_gas_limit(); + let base_fee = self.base_fee(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction - if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + if info.cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from // the iterator before we can continue @@ -340,20 +732,20 @@ where } // check if the job was cancelled, if so we can exit early - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + if self.cancel.is_cancelled() { + return Ok(Some(BuildOutcomeKind::Cancelled)) } // convert tx to a signed transaction let tx = pool_tx.to_recovered_transaction(); let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + self.evm_config.tx_env(tx.as_signed(), tx.signer()), ); // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -388,13 +780,13 @@ where // add gas used by the transaction to cumulative gas used, before creating the // receipt - cumulative_gas_used += gas_used; + info.cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { + info.receipts.push(Some(Receipt { tx_type: tx.tx_type(), success: result.is_success(), - cumulative_gas_used, + cumulative_gas_used: info.cumulative_gas_used, logs: result.into_logs().into_iter().map(Into::into).collect(), deposit_nonce: None, deposit_receipt_version: None, @@ -404,140 +796,14 @@ where let miner_fee = tx .effective_tip_per_gas(Some(base_fee)) .expect("fee is always valid; execution succeeded"); - total_fees += U256::from(miner_fee) * U256::from(gas_used); + info.total_fees += U256::from(miner_fee) * U256::from(gas_used); // append sender and transaction to the respective lists - executed_senders.push(tx.signer()); - executed_txs.push(tx.into_signed()); + info.executed_senders.push(tx.signer()); + info.executed_transactions.push(tx.into_signed()); } - } - - // check if we have a better block, but only if we included transactions from the pool - if !attributes.no_tx_pool && !is_better_payload(best_payload.as_ref(), total_fees) { - // can skip building the block - return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) - } - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &chain_spec, - attributes.payload_attributes.timestamp, - attributes.payload_attributes.withdrawals.clone(), - )?; - - // merge all transitions into bundle state, this would apply the withdrawal balance changes - // and 4788 contract call - db.merge_transitions(BundleRetention::Reverts); - - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let receipts_root = execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism(receipts, &chain_spec, attributes.timestamp()) - }) - .expect("Number is in range"); - let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); - - // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - let (state_root, trie_output) = { - db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { - warn!(target: "payload_builder", - parent_header=%parent_header.hash(), - %err, - "failed to calculate state root for payload" - ); - })? - }; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); - - // OP doesn't support blobs/EIP-4844. - // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions - // Need [Some] or [None] based on hardfork to match block hash. - let (excess_blob_gas, blob_gas_used) = - if chain_spec.is_ecotone_active_at_timestamp(attributes.payload_attributes.timestamp) { - (Some(0), Some(0)) - } else { - (None, None) - }; - - let is_holocene = - chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp); - - if is_holocene { - extra_data = attributes - .get_holocene_extra_data( - chain_spec.base_fee_params_at_timestamp(attributes.payload_attributes.timestamp), - ) - .map_err(PayloadBuilderError::other)?; - } - let header = Header { - parent_hash: parent_header.hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: attributes.payload_attributes.timestamp, - mix_hash: attributes.payload_attributes.prev_randao, - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee), - number: parent_header.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - extra_data, - parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, - blob_gas_used, - excess_blob_gas, - requests_hash: None, - }; - - // seal the block - let block = Block { - header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, - }; - - let sealed_block = block.seal_slow(); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - - // create the executed block data - let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), - senders: Arc::new(executed_senders), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie: Arc::new(trie_output), - }; - - let no_tx_pool = attributes.no_tx_pool; - - let payload = OpBuiltPayload::new( - attributes.payload_attributes.id, - sealed_block, - total_fees, - chain_spec, - attributes, - Some(executed), - ); - - if no_tx_pool { - // if `no_tx_pool` is set only transactions from the payload attributes will be included in - // the payload. In other words, the payload is deterministic and we can freeze it once we've - // successfully built it. - Ok(BuildOutcome::Freeze(payload)) - } else { - Ok(BuildOutcome::Better { payload, cached_reads }) + Ok(None) } } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index d6dcdf114b0..e57cc668d27 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -790,6 +790,37 @@ impl BuildOutcome { } } +/// The possible outcomes of a payload building attempt without reused [`CachedReads`] +#[derive(Debug)] +pub enum BuildOutcomeKind { + /// Successfully built a better block. + Better { + /// The new payload that was built. + payload: Payload, + }, + /// Aborted payload building because resulted in worse block wrt. fees. + Aborted { + /// The total fees associated with the attempted payload. + fees: U256, + }, + /// Build job was cancelled + Cancelled, + /// The payload is final and no further building should occur + Freeze(Payload), +} + +impl BuildOutcomeKind { + /// Attaches the [`CachedReads`] to the outcome. + pub fn with_cached_reads(self, cached_reads: CachedReads) -> BuildOutcome { + match self { + Self::Better { payload } => BuildOutcome::Better { payload, cached_reads }, + Self::Aborted { fees } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(payload), + } + } +} + /// A collection of arguments used for building payloads. /// /// This struct encapsulates the essential components and configuration required for the payload diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 512e3e31f12..b62b7ff2316 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -739,6 +739,11 @@ impl fmt::Display for CanonicalStateUpdate<'_> { } } +/// Alias to restrict the [`BestTransactions`] items to the pool's transaction type. +pub type BestTransactionsFor = Box< + dyn BestTransactions::Transaction>>>, +>; + /// An `Iterator` that only returns transactions that are ready to be executed. /// /// This makes no assumptions about the order of the transactions, but expects that _all_ From 0e83203658a58570028284c5cf32c3858974c0f8 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:40:10 +0100 Subject: [PATCH 322/970] primitives: rm alloy `BlobTransactionSidecar` reexport (#12310) --- bin/reth/src/commands/debug_cmd/build_block.rs | 7 +++---- crates/optimism/payload/src/payload.rs | 9 +++++---- crates/primitives/benches/validate_blob_tx.rs | 5 +++-- crates/primitives/src/lib.rs | 3 +-- crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/transaction/pooled.rs | 9 ++++----- crates/primitives/src/transaction/sidecar.rs | 4 +--- crates/transaction-pool/src/blobstore/disk.rs | 3 +-- crates/transaction-pool/src/blobstore/mem.rs | 6 ++---- crates/transaction-pool/src/blobstore/mod.rs | 3 +-- crates/transaction-pool/src/blobstore/noop.rs | 4 ++-- crates/transaction-pool/src/lib.rs | 4 ++-- crates/transaction-pool/src/noop.rs | 6 ++++-- crates/transaction-pool/src/pool/mod.rs | 4 ++-- crates/transaction-pool/src/test_utils/mock.rs | 7 +++---- crates/transaction-pool/src/traits.rs | 7 +++---- crates/transaction-pool/src/validate/mod.rs | 3 ++- 17 files changed, 40 insertions(+), 46 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 0559d473f62..30af4c61c53 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,6 +1,6 @@ //! Command for debugging block building. use alloy_consensus::TxEip4844; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, eip4844::BlobTransactionSidecar}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -27,9 +27,8 @@ use reth_node_api::{ }; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, - PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, - TransactionSigned, + revm_primitives::KzgSettings, BlobTransaction, PooledTransactionsElement, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 3a7d87acc4c..5acac70e914 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -3,7 +3,10 @@ //! Optimism builder support use crate::{builder::decode_eip_1559_params, error::EIP1559ParamError}; -use alloy_eips::{eip1559::BaseFeeParams, eip2718::Decodable2718, eip7685::Requests}; +use alloy_eips::{ + eip1559::BaseFeeParams, eip2718::Decodable2718, eip4844::BlobTransactionSidecar, + eip7685::Requests, +}; use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; @@ -15,9 +18,7 @@ use reth_chainspec::EthereumHardforks; use reth_optimism_chainspec::OpChainSpec; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{ - transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, -}; +use reth_primitives::{transaction::WithEncoded, SealedBlock, TransactionSigned, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 50498a9420f..453381366e1 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,9 @@ #![allow(missing_docs)] use alloy_consensus::TxEip4844; -use alloy_eips::eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}; +use alloy_eips::eip4844::{ + env_settings::EnvKzgSettings, BlobTransactionSidecar, MAX_BLOBS_PER_BLOCK, +}; use alloy_primitives::hex; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, @@ -12,7 +14,6 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; -use reth_primitives::BlobTransactionSidecar; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 09610bf7458..c16c4d3f42f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -47,8 +47,7 @@ pub use reth_primitives_traits::{ pub use static_file::StaticFileSegment; pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, + BlobTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, }; pub use transaction::{ diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index aa6aaa2d83e..194ddf9c0ce 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -31,7 +31,7 @@ pub use error::{ }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; +pub use sidecar::BlobTransaction; pub use compat::FillTxEnv; pub use signature::{extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked}; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 11da5d8385f..2d62bb3e685 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -6,10 +6,9 @@ use super::{ signature::{recover_signer, with_eip155_parity}, TxEip7702, }; -use crate::{ - BlobTransaction, BlobTransactionSidecar, Transaction, TransactionSigned, - TransactionSignedEcRecovered, -}; +use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use alloy_eips::eip4844::BlobTransactionSidecar; + use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, @@ -546,7 +545,7 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { match Self::try_from(tx_signed) { Ok(Self::BlobTransaction(mut tx)) => { // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; + tx.transaction.sidecar = alloy_eips::eip4844::BlobTransactionSidecar::arbitrary(u)?; Ok(Self::BlobTransaction(tx)) } Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index e7ff7a9b508..5bd647d5393 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -2,13 +2,11 @@ use crate::{Transaction, TransactionSigned}; use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; +use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Signature, TxHash}; use alloy_rlp::Header; use serde::{Deserialize, Serialize}; -#[doc(inline)] -pub use alloy_eips::eip4844::BlobTransactionSidecar; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 987264853db..9d02276db85 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -1,10 +1,9 @@ //! A simple diskstore for blobs use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; -use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc}; use tracing::{debug, trace}; diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index cea1837bdcd..0ab9c0d7af0 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -1,7 +1,5 @@ -use crate::blobstore::{ - BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize, BlobTransactionSidecar, -}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index f8d37bfcc0f..f1612bcd022 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,11 +1,10 @@ //! Storage for blob data of EIP4844 transactions. -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; -use reth_primitives::BlobTransactionSidecar; use std::{ fmt, sync::{ diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 0f293573556..943a6eeda95 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,5 +1,5 @@ -use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; use std::sync::Arc; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 02037599432..669cb69b0e8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -151,12 +151,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{identifier::TransactionId, pool::PoolInner}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::{BlobTransactionSidecar, PooledTransactionsElement}; +use reth_primitives::PooledTransactionsElement; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 47a26ee29a3..cf2270978ab 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,10 +16,12 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::BlobAndProofV1}; +use alloy_eips::{ + eip1559::ETHEREUM_BLOCK_GAS_LIMIT, + eip4844::{BlobAndProofV1, BlobTransactionSidecar}, +}; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 77446a52375..0841c0d3d28 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -86,9 +86,9 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; +use alloy_eips::eip4844::BlobTransactionSidecar; use reth_primitives::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, TransactionSigned, - TransactionSignedEcRecovered, + BlobTransaction, PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered, }; use std::{ collections::{HashMap, HashSet}, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index a272e8d00ed..c97632c7dcd 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -14,7 +14,7 @@ use alloy_consensus::{ use alloy_eips::{ eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList, - eip4844::{BlobTransactionValidationError, DATA_GAS_PER_BLOB}, + eip4844::{BlobTransactionSidecar, BlobTransactionValidationError, DATA_GAS_PER_BLOB}, }; use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; use paste::paste; @@ -23,9 +23,8 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TxType, + transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index b62b7ff2316..6be25cb2ecc 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -14,16 +14,15 @@ use alloy_consensus::{ use alloy_eips::{ eip2718::Encodable2718, eip2930::AccessList, - eip4844::{BlobAndProofV1, BlobTransactionValidationError}, + eip4844::{BlobAndProofV1, BlobTransactionSidecar, BlobTransactionValidationError}, }; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, - TransactionSignedEcRecovered, + kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4a82a1a148f..6a3b0b96e97 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -6,9 +6,10 @@ use crate::{ traits::{PoolTransaction, TransactionOrigin}, PriceBumpConfig, }; +use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{BlobTransactionSidecar, SealedBlock, TransactionSignedEcRecovered}; +use reth_primitives::{SealedBlock, TransactionSignedEcRecovered}; use std::{fmt, future::Future, time::Instant}; mod constants; From 39a667bbfeeb1d6d0d8860814030df5abff680c4 Mon Sep 17 00:00:00 2001 From: greg <82421016+greged93@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:58:16 +0100 Subject: [PATCH 323/970] feat: graceful incoming connection closing (#12282) Co-authored-by: Matthias Seitz --- crates/net/network/src/session/mod.rs | 46 +++++++++++++++++++++++++++ crates/net/network/src/swarm.rs | 11 +++++-- 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 712f076b47d..30b1cda9da9 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -110,6 +110,8 @@ pub struct SessionManager { active_session_rx: ReceiverStream, /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, + /// Tracks the ongoing graceful disconnections attempts for incoming connections. + disconnections_counter: DisconnectionsCounter, /// Metrics for the session manager. metrics: SessionManagerMetrics, } @@ -151,6 +153,7 @@ impl SessionManager { active_session_tx: MeteredPollSender::new(active_session_tx, "network_active_session"), active_session_rx: ReceiverStream::new(active_session_rx), extra_protocols, + disconnections_counter: Default::default(), metrics: Default::default(), } } @@ -376,6 +379,35 @@ impl SessionManager { Some(session) } + /// Try to gracefully disconnect an incoming connection by initiating a ECIES connection and + /// sending a disconnect. If [`SessionManager`] is at capacity for ongoing disconnections, will + /// simply drop the incoming connection. + pub(crate) fn try_disconnect_incoming_connection( + &self, + stream: TcpStream, + reason: DisconnectReason, + ) { + if !self.disconnections_counter.has_capacity() { + // drop the connection if we don't have capacity for gracefully disconnecting + return + } + + let guard = self.disconnections_counter.clone(); + let secret_key = self.secret_key; + + self.spawn(async move { + trace!( + target: "net::session", + "gracefully disconnecting incoming connection" + ); + if let Ok(stream) = get_ecies_stream(stream, secret_key, Direction::Incoming).await { + let mut unauth = UnauthedP2PStream::new(stream); + let _ = unauth.send_disconnect(reason).await; + drop(guard); + } + }); + } + /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. @@ -615,6 +647,20 @@ impl SessionManager { } } +/// A counter for ongoing graceful disconnections attempts. +#[derive(Default, Debug, Clone)] +struct DisconnectionsCounter(Arc<()>); + +impl DisconnectionsCounter { + const MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS: usize = 15; + + /// Returns true if the [`DisconnectionsCounter`] still has capacity + /// for an additional graceful disconnection. + fn has_capacity(&self) -> bool { + Arc::strong_count(&self.0) <= Self::MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS + } +} + /// Events produced by the [`SessionManager`] #[derive(Debug)] pub enum SessionEvent { diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 0be7ae1c1bb..c1fe9f9e231 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -8,7 +8,8 @@ use std::{ use futures::Stream; use reth_eth_wire::{ - capability::CapabilityMessage, errors::EthStreamError, Capabilities, EthVersion, Status, + capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, + EthVersion, Status, }; use reth_network_api::PeerRequestSender; use reth_network_peers::PeerId; @@ -32,7 +33,7 @@ use crate::{ /// [`SessionManager`]. Outgoing connections are either initiated on demand or triggered by the /// [`NetworkState`] and also delegated to the [`NetworkState`]. /// -/// Following diagram gives displays the dataflow contained in the [`Swarm`] +/// Following diagram displays the dataflow contained in the [`Swarm`] /// /// The [`ConnectionListener`] yields incoming [`TcpStream`]s from peers that are spawned as session /// tasks. After a successful `RLPx` authentication, the task is ready to accept ETH requests or @@ -70,7 +71,7 @@ impl Swarm { Self { incoming, sessions, state } } - /// Adds an additional protocol handler to the `RLPx` sub-protocol list. + /// Adds a protocol handler to the `RLPx` sub-protocol list. pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.sessions_mut().add_rlpx_sub_protocol(protocol); } @@ -201,6 +202,10 @@ impl Swarm { } InboundConnectionError::ExceedsCapacity => { trace!(target: "net", ?remote_addr, "No capacity for incoming connection"); + self.sessions.try_disconnect_incoming_connection( + stream, + DisconnectReason::TooManyPeers, + ); } } return None From ab037756e5f9afec363e50bdb2eadcfc5ecd761d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Nov 2024 16:17:42 +0100 Subject: [PATCH 324/970] feat: make it configurable how txs are yielded (#12337) --- crates/optimism/node/src/node.rs | 27 +++++++++-- crates/optimism/payload/src/builder.rs | 67 ++++++++++++++++++-------- 2 files changed, 70 insertions(+), 24 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 14088e636f1..ae146a60885 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -21,6 +21,7 @@ use reth_node_builder::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; +use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Header}; @@ -286,7 +287,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OpPayloadBuilder { +pub struct OpPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -296,12 +297,30 @@ pub struct OpPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag. pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } + Self { compute_pending_block, best_transactions: () } + } +} + +impl OpPayloadBuilder +where + Txs: OpPayloadTransactions, +{ + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions } } /// A helper method to initialize [`PayloadBuilderService`] with the given EVM config. @@ -319,6 +338,7 @@ impl OpPayloadBuilder { Evm: ConfigureEvm
, { let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) + .with_transactions(self.best_transactions) .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); @@ -345,11 +365,12 @@ impl OpPayloadBuilder { } } -impl PayloadServiceBuilder for OpPayloadBuilder +impl PayloadServiceBuilder for OpPayloadBuilder where Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, + Txs: OpPayloadTransactions, { async fn spawn_payload_service( self, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 35e1d905a46..f0c6c04ce73 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -42,26 +42,41 @@ use op_alloy_consensus::DepositTransaction; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OpPayloadBuilder { +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, /// The type responsible for creating the evm. pub evm_config: EvmConfig, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } impl OpPayloadBuilder { /// `OpPayloadBuilder` constructor. pub const fn new(evm_config: EvmConfig) -> Self { - Self { compute_pending_block: true, evm_config } + Self { compute_pending_block: true, evm_config, best_transactions: () } } +} +impl OpPayloadBuilder { /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; self } + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, evm_config, .. } = self; + OpPayloadBuilder { compute_pending_block, evm_config, best_transactions } + } + /// Enables the rollup's compute pending block configuration option. pub const fn compute_pending_block(self) -> Self { self.set_compute_pending_block(true) @@ -72,9 +87,10 @@ impl OpPayloadBuilder { self.compute_pending_block } } -impl OpPayloadBuilder +impl OpPayloadBuilder where EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). @@ -123,12 +139,7 @@ where best_payload, }; - let builder = OpBuilder { - pool, - // TODO(mattsse): make this configurable in the `OpPayloadBuilder` directly via an - // additional generic - best: best_txs::, - }; + let builder = OpBuilder { pool, best: self.best_transactions.clone() }; let state_provider = client.state_by_block_hash(ctx.parent().hash())?; let state = StateProviderDatabase::new(state_provider); @@ -149,11 +160,12 @@ where } /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. -impl PayloadBuilder for OpPayloadBuilder +impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, { type Attributes = OpPayloadBuilderAttributes; type BuiltPayload = OpBuiltPayload; @@ -210,18 +222,17 @@ where /// And finally /// 5. build the block: compute all roots (txs, state) #[derive(Debug)] -pub struct OpBuilder { +pub struct OpBuilder { /// The transaction pool pool: Pool, /// Yields the best transaction to include if transactions from the mempool are allowed. - // TODO(mattsse): convert this to a trait - best: Best, + best: Txs, } -impl OpBuilder +impl OpBuilder where Pool: TransactionPool, - Best: FnOnce(Pool, BestTransactionsAttributes) -> BestTransactionsFor, + Txs: OpPayloadTransactions, { /// Builds the payload on top of the state. pub fn build( @@ -248,7 +259,7 @@ where // 4. if mem pool transactions are requested we execute them if !ctx.attributes().no_tx_pool { - let best_txs = best(pool, ctx.best_transaction_attributes()); + let best_txs = best.best_transactions(pool, ctx.best_transaction_attributes()); if let Some(cancelled) = ctx.execute_best_transactions::<_, Pool>(&mut info, &mut db, best_txs)? { @@ -379,11 +390,25 @@ where } } -fn best_txs( - pool: Pool, - attr: BestTransactionsAttributes, -) -> BestTransactionsFor { - pool.best_transactions_with_attributes(attr) +/// A type that returns a the [`BestTransactions`] that should be included in the pool. +pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> BestTransactionsFor; +} + +impl OpPayloadTransactions for () { + fn best_transactions( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> BestTransactionsFor { + pool.best_transactions_with_attributes(attr) + } } /// This acts as the container for executed transactions and its byproducts (receipts, gas used) From 6ddb3eac97c63ed489ade8a92ad334690647708d Mon Sep 17 00:00:00 2001 From: Skanda Bhat Date: Tue, 5 Nov 2024 17:28:18 +0000 Subject: [PATCH 325/970] feat: mev_simBundle (#12218) Co-authored-by: Arsenii Kulikov --- crates/rpc/rpc/src/eth/sim_bundle.rs | 476 ++++++++++++++++++++++++++- 1 file changed, 465 insertions(+), 11 deletions(-) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 46dbb45d962..67fd5181759 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -1,15 +1,71 @@ //! `Eth` Sim bundle implementation and helpers. -use std::sync::Arc; - -use alloy_rpc_types_mev::{SendBundleRequest, SimBundleOverrides, SimBundleResponse}; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::U256; +use alloy_rpc_types::BlockId; +use alloy_rpc_types_mev::{ + BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, + SimBundleOverrides, SimBundleResponse, Validity, +}; use jsonrpsee::core::RpcResult; +use reth_chainspec::EthChainSpec; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm_primitives::db::{DatabaseCommit, DatabaseRef}, + TransactionSigned, +}; +use reth_provider::{ChainSpecProvider, HeaderProvider}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; -use reth_rpc_eth_api::helpers::{Call, EthTransactions, LoadPendingBlock}; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + FromEthApiError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_tasks::pool::BlockingTaskGuard; +use revm::{ + db::CacheDB, + primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, +}; +use std::{sync::Arc, time::Duration}; use tracing::info; +/// Maximum bundle depth +const MAX_NESTED_BUNDLE_DEPTH: usize = 5; + +/// Maximum body size +const MAX_BUNDLE_BODY_SIZE: usize = 50; + +/// Default simulation timeout +const DEFAULT_SIM_TIMEOUT: Duration = Duration::from_secs(5); + +/// Maximum simulation timeout +const MAX_SIM_TIMEOUT: Duration = Duration::from_secs(30); + +/// Maximum payout cost +const SBUNDLE_PAYOUT_MAX_COST: u64 = 30_000; + +/// A flattened representation of a bundle item containing transaction and associated metadata. +#[derive(Clone, Debug)] +pub struct FlattenedBundleItem { + /// The signed transaction + pub tx: TransactionSigned, + /// The address that signed the transaction + pub signer: Address, + /// Whether the transaction is allowed to revert + pub can_revert: bool, + /// Item-level inclusion constraints + pub inclusion: Inclusion, + /// Optional validity constraints for the bundle item + pub validity: Option, + /// Optional privacy settings for the bundle item + pub privacy: Option, + /// Optional refund percent for the bundle item + pub refund_percent: Option, + /// Optional refund configs for the bundle item + pub refund_configs: Option>, +} + /// `Eth` sim bundle implementation. pub struct EthSimBundle { /// All nested fields bundled together. @@ -21,20 +77,370 @@ impl EthSimBundle { pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) } } + + /// Access the underlying `Eth` API. + pub fn eth_api(&self) -> &Eth { + &self.inner.eth_api + } } impl EthSimBundle where Eth: EthTransactions + LoadPendingBlock + Call + 'static, { - /// Simulates a bundle of transactions. - pub async fn sim_bundle( + /// Flattens a potentially nested bundle into a list of individual transactions in a + /// `FlattenedBundleItem` with their associated metadata. This handles recursive bundle + /// processing up to `MAX_NESTED_BUNDLE_DEPTH` and `MAX_BUNDLE_BODY_SIZE`, preserving + /// inclusion, validity and privacy settings from parent bundles. + fn parse_and_flatten_bundle( + &self, + request: &SendBundleRequest, + ) -> Result, EthApiError> { + let mut items = Vec::new(); + + // Stack for processing bundles + let mut stack = Vec::new(); + + // Start with initial bundle, index 0, and depth 1 + stack.push((request, 0, 1)); + + while let Some((current_bundle, mut idx, depth)) = stack.pop() { + // Check max depth + if depth > MAX_NESTED_BUNDLE_DEPTH { + return Err(EthApiError::InvalidParams(EthSimBundleError::MaxDepth.to_string())); + } + + // Determine inclusion, validity, and privacy + let inclusion = ¤t_bundle.inclusion; + let validity = ¤t_bundle.validity; + let privacy = ¤t_bundle.privacy; + + // Validate inclusion parameters + let block_number = inclusion.block_number(); + let max_block_number = inclusion.max_block_number().unwrap_or(block_number); + + if max_block_number < block_number || block_number == 0 { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + )); + } + + // Validate bundle body size + if current_bundle.bundle_body.len() > MAX_BUNDLE_BODY_SIZE { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTooLarge.to_string(), + )); + } + + // Validate validity and refund config + if let Some(validity) = ¤t_bundle.validity { + // Validate refund entries + if let Some(refunds) = &validity.refund { + let mut total_percent = 0; + for refund in refunds { + if refund.body_idx as usize >= current_bundle.bundle_body.len() { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + if 100 - total_percent < refund.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund.percent; + } + } + + // Validate refund configs + if let Some(refund_configs) = &validity.refund_config { + let mut total_percent = 0; + for refund_config in refund_configs { + if 100 - total_percent < refund_config.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund_config.percent; + } + } + } + + let body = ¤t_bundle.bundle_body; + + // Process items in the current bundle + while idx < body.len() { + match &body[idx] { + BundleItem::Tx { tx, can_revert } => { + let recovered_tx = + recover_raw_transaction(tx.clone()).map_err(EthApiError::from)?; + let (tx, signer) = recovered_tx.into_components(); + let tx = tx.into_transaction(); + + let refund_percent = + validity.as_ref().and_then(|v| v.refund.as_ref()).and_then(|refunds| { + refunds.iter().find_map(|refund| { + (refund.body_idx as usize == idx).then_some(refund.percent) + }) + }); + let refund_configs = + validity.as_ref().and_then(|v| v.refund_config.clone()); + + // Create FlattenedBundleItem with current inclusion, validity, and privacy + let flattened_item = FlattenedBundleItem { + tx, + signer, + can_revert: *can_revert, + inclusion: inclusion.clone(), + validity: validity.clone(), + privacy: privacy.clone(), + refund_percent, + refund_configs, + }; + + // Add to items + items.push(flattened_item); + + idx += 1; + } + BundleItem::Bundle { bundle } => { + // Push the current bundle and next index onto the stack to resume later + stack.push((current_bundle, idx + 1, depth)); + + // process the nested bundle next + stack.push((bundle, 0, depth + 1)); + break; + } + BundleItem::Hash { hash: _ } => { + // Hash-only items are not allowed + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidBundle.to_string(), + )); + } + } + } + } + + Ok(items) + } + + async fn sim_bundle( &self, request: SendBundleRequest, overrides: SimBundleOverrides, - ) -> RpcResult { - info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); - Err(EthApiError::Unsupported("mev_simBundle is not supported").into()) + logs: bool, + ) -> Result { + let SimBundleOverrides { + parent_block, + block_number, + coinbase, + timestamp, + gas_limit, + base_fee, + .. + } = overrides; + + // Parse and validate bundle + // Also, flatten the bundle here so that its easier to process + let flattened_bundle = self.parse_and_flatten_bundle(&request)?; + + let block_id = parent_block.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); + let (cfg, mut block_env, current_block) = self.eth_api().evm_env_at(block_id).await?; + + let parent_header = RpcNodeCore::provider(&self.inner.eth_api) + .header_by_number(block_env.number.saturating_to::()) + .map_err(EthApiError::from_eth_err)? // Explicitly map the error + .ok_or_else(|| { + EthApiError::HeaderNotFound((block_env.number.saturating_to::()).into()) + })?; + + // apply overrides + if let Some(block_number) = block_number { + block_env.number = U256::from(block_number); + } + + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + + if let Some(timestamp) = timestamp { + block_env.timestamp = U256::from(timestamp); + } + + if let Some(gas_limit) = gas_limit { + block_env.gas_limit = U256::from(gas_limit); + } + + if let Some(base_fee) = base_fee { + block_env.basefee = U256::from(base_fee); + } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + if let Some(base_fee) = parent_header.next_block_base_fee( + RpcNodeCore::provider(&self.inner.eth_api) + .chain_spec() + .base_fee_params_at_block(block_env.number.saturating_to::()), + ) { + block_env.basefee = U256::from(base_fee); + } + } + + let eth_api = self.inner.eth_api.clone(); + + let sim_response = self + .inner + .eth_api + .spawn_with_state_at_block(current_block, move |state| { + // Setup environment + let current_block_number = current_block.as_u64().unwrap(); + let coinbase = block_env.coinbase; + let basefee = block_env.basefee; + let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); + let db = CacheDB::new(StateProviderDatabase::new(state)); + + let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) + .map_err(EthApiError::from_eth_err)? + .map(|acc| acc.balance) + .unwrap_or_default(); + + let mut coinbase_balance_before_tx = initial_coinbase_balance; + let mut total_gas_used = 0; + let mut total_profit = U256::ZERO; + let mut refundable_value = U256::ZERO; + let mut body_logs: Vec = Vec::new(); + + let mut evm = RpcNodeCore::evm_config(ð_api).evm_with_env(db, env); + + for item in &flattened_bundle { + // Check inclusion constraints + let block_number = item.inclusion.block_number(); + let max_block_number = + item.inclusion.max_block_number().unwrap_or(block_number); + + if current_block_number < block_number || + current_block_number > max_block_number + { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + ) + .into()); + } + RpcNodeCore::evm_config(ð_api).fill_tx_env( + evm.tx_mut(), + &item.tx, + item.signer, + ); + + let ResultAndState { result, state } = + evm.transact().map_err(EthApiError::from_eth_err)?; + + if !result.is_success() && !item.can_revert { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTransactionFailed.to_string(), + ) + .into()); + } + + let gas_used = result.gas_used(); + total_gas_used += gas_used; + + // coinbase is always present in the result state + let coinbase_balance_after_tx = + state.get(&coinbase).map(|acc| acc.info.balance).unwrap_or_default(); + + let coinbase_diff = + coinbase_balance_after_tx.saturating_sub(coinbase_balance_before_tx); + total_profit += coinbase_diff; + + // Add to refundable value if this tx does not have a refund percent + if item.refund_percent.is_none() { + refundable_value += coinbase_diff; + } + + // Update coinbase balance before next tx + coinbase_balance_before_tx = coinbase_balance_after_tx; + + // Collect logs if requested + // TODO: since we are looping over iteratively, we are not collecting bundle + // logs. We should collect bundle logs when we are processing the bundle items. + if logs { + let tx_logs = result.logs().to_vec(); + let sim_bundle_logs = + SimBundleLogs { tx_logs: Some(tx_logs), bundle_logs: None }; + body_logs.push(sim_bundle_logs); + } + + // Apply state changes + evm.context.evm.db.commit(state); + } + + // After processing all transactions, process refunds + for item in &flattened_bundle { + if let Some(refund_percent) = item.refund_percent { + // Get refund configurations + let refund_configs = item.refund_configs.clone().unwrap_or_else(|| { + vec![RefundConfig { address: item.signer, percent: 100 }] + }); + + // Calculate payout transaction fee + let payout_tx_fee = basefee * + U256::from(SBUNDLE_PAYOUT_MAX_COST) * + U256::from(refund_configs.len() as u64); + + // Add gas used for payout transactions + total_gas_used += SBUNDLE_PAYOUT_MAX_COST * refund_configs.len() as u64; + + // Calculate allocated refundable value (payout value) + let payout_value = + refundable_value * U256::from(refund_percent) / U256::from(100); + + if payout_tx_fee > payout_value { + return Err(EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ) + .into()); + } + + // Subtract payout value from total profit + total_profit = total_profit.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + + // Adjust refundable value + refundable_value = refundable_value.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + } + } + + // Calculate mev gas price + let mev_gas_price = if total_gas_used != 0 { + total_profit / U256::from(total_gas_used) + } else { + U256::ZERO + }; + + Ok(SimBundleResponse { + success: true, + state_block: current_block_number, + error: None, + logs: Some(body_logs), + gas_used: total_gas_used, + mev_gas_price, + profit: total_profit, + refundable_value, + exec_error: None, + revert: None, + }) + }) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + Ok(sim_response) } } @@ -48,7 +454,23 @@ where request: SendBundleRequest, overrides: SimBundleOverrides, ) -> RpcResult { - Self::sim_bundle(self, request, overrides).await + info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); + + let override_timeout = overrides.timeout; + + let timeout = override_timeout + .map(Duration::from_secs) + .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .unwrap_or(DEFAULT_SIM_TIMEOUT); + + let bundle_res = + tokio::time::timeout(timeout, Self::sim_bundle(self, request, overrides, true)) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + bundle_res.map_err(Into::into) } } @@ -74,3 +496,35 @@ impl Clone for EthSimBundle { Self { inner: Arc::clone(&self.inner) } } } + +/// [`EthSimBundle`] specific errors. +#[derive(Debug, thiserror::Error)] +pub enum EthSimBundleError { + /// Thrown when max depth is reached + #[error("max depth reached")] + MaxDepth, + /// Thrown when a bundle is unmatched + #[error("unmatched bundle")] + UnmatchedBundle, + /// Thrown when a bundle is too large + #[error("bundle too large")] + BundleTooLarge, + /// Thrown when validity is invalid + #[error("invalid validity")] + InvalidValidity, + /// Thrown when inclusion is invalid + #[error("invalid inclusion")] + InvalidInclusion, + /// Thrown when a bundle is invalid + #[error("invalid bundle")] + InvalidBundle, + /// Thrown when a bundle simulation times out + #[error("bundle simulation timed out")] + BundleTimeout, + /// Thrown when a transaction is reverted in a bundle + #[error("bundle transaction failed")] + BundleTransactionFailed, + /// Thrown when a bundle simulation returns negative profit + #[error("bundle simulation returned negative profit")] + NegativeProfit, +} From 3f02e18dc589f34a52da2fbe20c0b5a351442220 Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Wed, 6 Nov 2024 02:30:08 -0500 Subject: [PATCH 326/970] fix: add dev feature in error message to generate test vectors (#12084) --- crates/storage/db/benches/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 9700ef94b24..d4ae96e0006 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -32,7 +32,7 @@ where env!("CARGO_MANIFEST_DIR"), T::NAME )) - .expect("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth -- test-vectors tables`."), + .expect("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`."), )) .unwrap(); From 0c7700f2c7f87df7fb9c568005b2b977fd72bccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:58:45 +0700 Subject: [PATCH 327/970] feat(payload) : optimize new payload job by fetching only header hash instead of block (#12343) --- crates/payload/basic/src/lib.rs | 39 ++++++++++++-------------- crates/payload/primitives/src/error.rs | 3 ++ 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index e57cc668d27..9c90f7f83e0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,7 +10,7 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::{merge::SLOT_DURATION, BlockNumberOrTag}; +use alloy_eips::merge::SLOT_DURATION; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -22,7 +22,7 @@ use reth_payload_primitives::{ }; use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader, Withdrawals}; use reth_provider::{ - BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, + BlockReaderIdExt, CanonStateNotification, ProviderError, StateProviderFactory, }; use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; @@ -144,33 +144,30 @@ where &self, attributes: ::PayloadAttributes, ) -> Result { - let parent_block = if attributes.parent().is_zero() { - // use latest block if parent is zero: genesis block + let parent_header = if attributes.parent().is_zero() { + // Use latest header for genesis block case self.client - .block_by_number_or_tag(BlockNumberOrTag::Latest)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))? - .seal_slow() + .latest_header() + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(B256::ZERO))? } else { - let block = self - .client - .find_block_by_hash(attributes.parent(), BlockSource::Any)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))?; - - // we already know the hash, so we can seal it - block.seal(attributes.parent()) + // Fetch specific header by hash + self.client + .sealed_header_by_hash(attributes.parent()) + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(attributes.parent()))? }; - let hash = parent_block.hash(); - let parent_header = parent_block.header(); - let header = SealedHeader::new(parent_header.clone(), hash); - - let config = - PayloadConfig::new(Arc::new(header), self.config.extradata.clone(), attributes); + let config = PayloadConfig::new( + Arc::new(parent_header.clone()), + self.config.extradata.clone(), + attributes, + ); let until = self.job_deadline(config.attributes.timestamp()); let deadline = Box::pin(tokio::time::sleep_until(until)); - let cached_reads = self.maybe_pre_cached(hash); + let cached_reads = self.maybe_pre_cached(parent_header.hash()); let mut job = BasicPayloadJob { config, diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 00df9e8d290..16446255c35 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -9,6 +9,9 @@ use tokio::sync::oneshot; /// Possible error variants during payload building. #[derive(Debug, thiserror::Error)] pub enum PayloadBuilderError { + /// Thrown when the parent header cannot be found + #[error("missing parent header: {0}")] + MissingParentHeader(B256), /// Thrown when the parent block is missing. #[error("missing parent block {0}")] MissingParentBlock(B256), From e34a88d2ccd7ff0a12394f4806989cc5db05976b Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 6 Nov 2024 02:30:15 -0600 Subject: [PATCH 328/970] renamed OptimismNode to OpNode (#12338) --- crates/node/builder/src/builder/mod.rs | 2 +- crates/optimism/bin/src/main.rs | 8 ++++---- crates/optimism/cli/src/lib.rs | 20 +++++++++----------- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/src/node.rs | 10 +++++----- crates/optimism/node/tests/e2e/utils.rs | 4 ++-- crates/optimism/node/tests/it/builder.rs | 6 +++--- 7 files changed, 25 insertions(+), 27 deletions(-) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 82d8d96f6f5..2e00b08f8a5 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -79,7 +79,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter< /// configured components and can interact with the node. /// /// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OptimismNode`. +/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OpNode`. /// /// The [`NodeBuilder::node`] function configures the node's types and components in one step. /// diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 6c440f43491..840da3bcf0b 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -5,7 +5,7 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -34,8 +34,8 @@ fn main() { .with_persistence_threshold(rollup_args.persistence_threshold) .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder - .with_types_and_provider::>() - .with_components(OptimismNode::components(rollup_args)) + .with_types_and_provider::>() + .with_components(OpNode::components(rollup_args)) .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( @@ -51,7 +51,7 @@ fn main() { } true => { let handle = - builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; + builder.node(OpNode::new(rollup_args.clone())).launch().await?; handle.node_exit_future.await } diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 43d12616484..b3c7c86d1d1 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -47,7 +47,7 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_optimism_evm::OpExecutorProvider; -use reth_optimism_node::OptimismNode; +use reth_optimism_node::OpNode; use reth_tracing::FileWorkerGuard; use tracing::info; @@ -145,30 +145,28 @@ where runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportReceiptsOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, OpExecutorProvider::optimism) + command.execute::(ctx, OpExecutorProvider::optimism) }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) + runner.run_command_until_exit(|ctx| command.execute::(ctx)) } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), } diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index f2870d0b839..6419611067e 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -18,7 +18,7 @@ pub mod engine; pub use engine::OpEngineTypes; pub mod node; -pub use node::OptimismNode; +pub use node::OpNode; pub mod txpool; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ae146a60885..2e1f71a5175 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -51,12 +51,12 @@ impl NodePrimitives for OpPrimitives { /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismNode { +pub struct OpNode { /// Additional Optimism args pub args: RollupArgs, } -impl OptimismNode { +impl OpNode { /// Creates a new instance of the Optimism node type. pub const fn new(args: RollupArgs) -> Self { Self { args } @@ -92,7 +92,7 @@ impl OptimismNode { } } -impl Node for OptimismNode +impl Node for OpNode where N: FullNodeTypes>, { @@ -119,13 +119,13 @@ where } } -impl NodeTypes for OptimismNode { +impl NodeTypes for OpNode { type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; type StateCommitment = MerklePatriciaTrie; } -impl NodeTypesWithEngine for OptimismNode { +impl NodeTypesWithEngine for OpNode { type Engine = OpEngineTypes; } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 16eb974914d..a8afab87ec2 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -6,14 +6,14 @@ use reth_e2e_test_utils::{ }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ - node::OptimismAddOns, OpBuiltPayload, OpPayloadBuilderAttributes, OptimismNode, + node::OptimismAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes, }; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; +pub(crate) type OpNode = NodeHelperType>>; pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index f1dde4c2c0a..3bd2da75557 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -4,7 +4,7 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{node::OptimismAddOns, OpNode}; #[test] fn test_basic_setup() { @@ -13,8 +13,8 @@ fn test_basic_setup() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types::() - .with_components(OptimismNode::components(Default::default())) + .with_types::() + .with_components(OpNode::components(Default::default())) .with_add_ons(OptimismAddOns::new(None)) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); From 1a091a76432021e2a51187debb50cb5ecba10f3f Mon Sep 17 00:00:00 2001 From: Eniko Nagy <4188977+eenagy@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:44:12 +0700 Subject: [PATCH 329/970] fix: allow SOURCE_DATE_EPOCH to be overridable (#12342) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ac4ad103858..b1908d7b109 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" -SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) +SOURCE_DATE_EPOCH ?= $(shell git log -1 --pretty=%ct) .PHONY: reproducible reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ From f1ac9b87b8cc3bd1c59a9a463f3c3693c3d3f048 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:45:00 +0100 Subject: [PATCH 330/970] refactor(rpc): simplifications with `DatabaseRef` calls (#12294) --- crates/rpc/rpc-eth-types/src/revm_utils.rs | 2 +- crates/rpc/rpc/src/eth/bundle.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 7dc20c52421..ee3c6e7d9a7 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -265,7 +265,7 @@ where { // we need to fetch the account via the `DatabaseRef` to not update the state of the account, // which is modified via `Database::basic_ref` - let mut account_info = DatabaseRef::basic_ref(db, account)?.unwrap_or_default(); + let mut account_info = db.basic_ref(account)?.unwrap_or_default(); if let Some(nonce) = account_override.nonce { account_info.nonce = nonce; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index ec1a43c7548..4d72efd1f8c 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -156,7 +156,8 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); let db = CacheDB::new(StateProviderDatabase::new(state)); - let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase) + let initial_coinbase = db + .basic_ref(coinbase) .map_err(Eth::Error::from_eth_err)? .map(|acc| acc.balance) .unwrap_or_default(); From 8ec3af65b4f1fea82c8429c68c3fbaf337941ddd Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:45:22 +0100 Subject: [PATCH 331/970] test(tokio-util): add unit tests for `RateLimit` (#12297) --- crates/tokio-util/src/ratelimit.rs | 117 ++++++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/crates/tokio-util/src/ratelimit.rs b/crates/tokio-util/src/ratelimit.rs index 16e403f10aa..33a9c5273d8 100644 --- a/crates/tokio-util/src/ratelimit.rs +++ b/crates/tokio-util/src/ratelimit.rs @@ -8,7 +8,7 @@ use std::{ }; use tokio::time::Sleep; -/// Given a [Rate] this type enforces a rate limit. +/// Given a [`Rate`] this type enforces a rate limit. #[derive(Debug)] pub struct RateLimit { rate: Rate, @@ -122,6 +122,7 @@ impl Rate { #[cfg(test)] mod tests { use super::*; + use tokio::time; #[tokio::test] async fn test_rate_limit() { @@ -157,4 +158,118 @@ mod tests { }) .await; } + + #[tokio::test] + async fn test_rate_limit_initialization() { + let rate = Rate::new(5, Duration::from_secs(1)); + let limit = RateLimit::new(rate); + + // Verify the limit is correctly set + assert_eq!(limit.limit(), 5); + } + + #[tokio::test] + async fn test_rate_limit_allows_within_limit() { + let mut limit = RateLimit::new(Rate::new(3, Duration::from_millis(1))); + + // Check that the rate limiter is ready initially + for _ in 0..3 { + poll_fn(|cx| { + // Should be ready within the limit + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + // Signal that a request has been made + limit.tick(); + } + + // After 3 requests, it should be pending (rate limit hit) + poll_fn(|cx| { + // Exceeded limit, should now be limited + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_rate_limit_enforces_wait_after_limit() { + let mut limit = RateLimit::new(Rate::new(2, Duration::from_millis(500))); + + // Consume the limit + for _ in 0..2 { + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + limit.tick(); + } + + // Should now be limited (pending) + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // Wait until the rate period elapses + time::sleep(limit.rate.duration()).await; + + // Now it should be ready again after the wait + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_wait_method_awaits_readiness() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_millis(500))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + limit.tick(); + + // The limit should now be exceeded + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // The `wait` method should block until the rate period elapses + limit.wait().await; + + // After `wait`, it should now be ready + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + #[should_panic(expected = "RateLimit limited; poll_ready must be called first")] + async fn test_tick_panics_when_limited() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_secs(1))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + // Consume the limit + limit.tick(); + + // Attempting to tick again without poll_ready being ready should panic + limit.tick(); + } } From 4bac1530d7f403ae3bca58f736aba9a1911bab4c Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Wed, 6 Nov 2024 05:19:15 -0500 Subject: [PATCH 332/970] feat: add gas averages over time into grafana (#12237) --- etc/grafana/dashboards/overview.json | 96 ++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 25cc280fe03..a19d3be8cf6 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -3017,6 +3017,102 @@ "legendFormat": "Gas/s", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1m)", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[5m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (5m)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[10m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (10m)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[30m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (30m)", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1h)", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[24h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (24h)", + "range": true, + "refId": "G", + "useBackend": false } ], "title": "Execution throughput", From 4048117bcff47cd720e2bf16a01e031711017040 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 6 Nov 2024 04:20:33 -0600 Subject: [PATCH 333/970] Track time diff between `new_payload` and FCU (#12245) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/engine_api.rs | 39 ++++++++++++++++++--- crates/rpc/rpc-engine-api/src/metrics.rs | 2 ++ 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3fcd59e79f..0f6a62647a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8790,6 +8790,7 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", + "parking_lot", "reth-beacon-consensus", "reth-chainspec", "reth-engine-primitives", diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 00503f2c1dd..62d1eea3225 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -45,6 +45,7 @@ jsonrpsee-types.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true +parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 20eeb390ac1..a017c50678f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -10,6 +10,7 @@ use alloy_rpc_types_engine::{ }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; +use parking_lot::Mutex; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_engine_primitives::{EngineTypes, EngineValidator}; @@ -67,6 +68,8 @@ struct EngineApiInner>, } impl @@ -102,6 +105,7 @@ where capabilities, tx_pool, validator, + latest_new_payload_response: Mutex::new(None), }); Self { inner } } @@ -140,11 +144,13 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; + Ok(self .inner .beacon_consensus .new_payload(payload, ExecutionPayloadSidecar::none()) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) } /// See also @@ -164,7 +170,8 @@ where .inner .beacon_consensus .new_payload(payload, ExecutionPayloadSidecar::none()) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) } /// See also @@ -194,7 +201,8 @@ where parent_beacon_block_root, }), ) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) } /// See also @@ -225,7 +233,8 @@ where execution_requests, ), ) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -598,6 +607,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { + self.inner.record_elapsed_time_on_fcu(); + if let Some(ref attrs) = payload_attrs { let attr_validation_res = self.inner.validator.ensure_well_formed_attributes(version, attrs); @@ -631,6 +642,26 @@ where } } +impl + EngineApiInner +where + EngineT: EngineTypes, +{ + /// Tracks the elapsed time between the new payload response and the received forkchoice update + /// request. + fn record_elapsed_time_on_fcu(&self) { + if let Some(start_time) = self.latest_new_payload_response.lock().take() { + let elapsed_time = start_time.elapsed(); + self.metrics.latency.new_payload_forkchoice_updated_time_diff.record(elapsed_time); + } + } + + /// Updates the timestamp for the latest new payload response. + fn on_new_payload_response(&self) { + self.latest_new_payload_response.lock().replace(Instant::now()); + } +} + #[async_trait] impl EngineApiServer for EngineApi diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 8d0106f9dd9..9325ce26778 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -34,6 +34,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) fork_choice_updated_v2: Histogram, /// Latency for `engine_forkchoiceUpdatedV3` pub(crate) fork_choice_updated_v3: Histogram, + /// Time diff between `engine_newPayloadV*` and the next FCU + pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, /// Latency for `engine_getPayloadV1` pub(crate) get_payload_v1: Histogram, /// Latency for `engine_getPayloadV2` From 098fa7f6119b332eb64b3fa2e2b4bd9414dbf5c9 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 6 Nov 2024 12:38:29 +0100 Subject: [PATCH 334/970] feat(engine): use execute_with_state_hook in ExecutorMetrics (#12316) --- Cargo.lock | 12 ++ crates/engine/tree/src/tree/mod.rs | 10 +- crates/evm/Cargo.toml | 1 + crates/evm/src/metrics.rs | 243 ++++++++++++++++++++++++++--- 4 files changed, 239 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f6a62647a7..f70611f7a5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4863,7 +4863,9 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.0", + "indexmap 2.6.0", "metrics", + "ordered-float", "quanta", "sketches-ddsketch", ] @@ -5404,6 +5406,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-float" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d501f1a72f71d3c063a6bbc8f7271fa73aa09fe5d6283b6571e2ed176a2537" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -7440,6 +7451,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", + "metrics-util", "parking_lot", "reth-chainspec", "reth-consensus", diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c3e922d11c9..36108e63bf8 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -44,6 +44,7 @@ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; +use revm_primitives::ResultAndState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -2187,7 +2188,14 @@ where let block = block.unseal(); let exec_time = Instant::now(); - let output = self.metrics.executor.execute_metered(executor, (&block, U256::MAX).into())?; + // TODO: create StateRootTask with the receiving end of a channel and + // pass the sending end of the channel to the state hook. + let noop_state_hook = |_result_and_state: &ResultAndState| {}; + let output = self.metrics.executor.execute_metered( + executor, + (&block, U256::MAX).into(), + Box::new(noop_state_hook), + )?; trace!(target: "engine::tree", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); if let Err(err) = self.consensus.validate_block_post_execution( diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index a4ce3c3893b..c895110209b 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -40,6 +40,7 @@ parking_lot = { workspace = true, optional = true } parking_lot.workspace = true reth-ethereum-forks.workspace = true alloy-consensus.workspace = true +metrics-util = { workspace = true, features = ["debugging"] } [features] default = ["std"] diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index fbb2b858b15..3464bb96f4c 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -2,14 +2,41 @@ //! //! Block processing related to syncing should take care to update the metrics by using either //! [`ExecutorMetrics::execute_metered`] or [`ExecutorMetrics::metered_one`]. -use std::time::Instant; - +use crate::{execute::Executor, system_calls::OnStateHook}; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; use reth_primitives::BlockWithSenders; +use revm_primitives::ResultAndState; +use std::time::Instant; + +/// Wrapper struct that combines metrics and state hook +struct MeteredStateHook { + metrics: ExecutorMetrics, + inner_hook: Box, +} + +impl OnStateHook for MeteredStateHook { + fn on_state(&mut self, result_and_state: &ResultAndState) { + // Update the metrics for the number of accounts, storage slots and bytecodes loaded + let accounts = result_and_state.state.keys().len(); + let storage_slots = + result_and_state.state.values().map(|account| account.storage.len()).sum::(); + let bytecodes = result_and_state + .state + .values() + .filter(|account| !account.info.is_empty_code_hash()) + .collect::>() + .len(); + + self.metrics.accounts_loaded_histogram.record(accounts as f64); + self.metrics.storage_slots_loaded_histogram.record(storage_slots as f64); + self.metrics.bytecodes_loaded_histogram.record(bytecodes as f64); -use crate::execute::Executor; + // Call the original state hook + self.inner_hook.on_state(result_and_state); + } +} /// Executor metrics. // TODO(onbjerg): add sload/sstore @@ -65,10 +92,13 @@ impl ExecutorMetrics { /// /// Compared to [`Self::metered_one`], this method additionally updates metrics for the number /// of accounts, storage slots and bytecodes loaded and updated. + /// Execute the given block using the provided [`Executor`] and update metrics for the + /// execution. pub fn execute_metered<'a, E, DB, O, Error>( &self, executor: E, input: BlockExecutionInput<'a, BlockWithSenders>, + state_hook: Box, ) -> Result, Error> where E: Executor< @@ -78,29 +108,16 @@ impl ExecutorMetrics { Error = Error, >, { - let output = self.metered(input.block, || { - executor.execute_with_state_closure(input, |state: &revm::db::State| { - // Update the metrics for the number of accounts, storage slots and bytecodes - // loaded - let accounts = state.cache.accounts.len(); - let storage_slots = state - .cache - .accounts - .values() - .filter_map(|account| { - account.account.as_ref().map(|account| account.storage.len()) - }) - .sum::(); - let bytecodes = state.cache.contracts.len(); - - // Record all state present in the cache state as loaded even though some might have - // been newly created. - // TODO: Consider spitting these into loaded and newly created. - self.accounts_loaded_histogram.record(accounts as f64); - self.storage_slots_loaded_histogram.record(storage_slots as f64); - self.bytecodes_loaded_histogram.record(bytecodes as f64); - }) - })?; + // clone here is cheap, all the metrics are Option>. additionally + // they are gloally registered so that the data recorded in the hook will + // be accessible. + let wrapper = MeteredStateHook { metrics: self.clone(), inner_hook: state_hook }; + + // Store reference to block for metered + let block = input.block; + + // Use metered to execute and track timing/gas metrics + let output = self.metered(block, || executor.execute_with_state_hook(input, wrapper))?; // Update the metrics for the number of accounts, storage slots and bytecodes updated let accounts = output.state.state.len(); @@ -123,3 +140,177 @@ impl ExecutorMetrics { self.metered(input.block, || f(input)) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use revm::db::BundleState; + use revm_primitives::{ + Account, AccountInfo, AccountStatus, Bytes, EvmState, EvmStorage, EvmStorageSlot, + ExecutionResult, Output, SuccessReason, B256, U256, + }; + use std::sync::mpsc; + + /// A mock executor that simulates state changes + struct MockExecutor { + result_and_state: ResultAndState, + } + + impl Executor<()> for MockExecutor { + type Input<'a> + = BlockExecutionInput<'a, BlockWithSenders> + where + Self: 'a; + type Output = BlockExecutionOutput<()>; + type Error = std::convert::Infallible; + + fn execute(self, _input: Self::Input<'_>) -> Result { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_closure( + self, + _input: Self::Input<'_>, + _state: F, + ) -> Result + where + F: FnMut(&revm::State<()>), + { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_hook( + self, + _input: Self::Input<'_>, + mut hook: F, + ) -> Result + where + F: OnStateHook + 'static, + { + // Call hook with our mock state + hook.on_state(&self.result_and_state); + + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + } + + struct ChannelStateHook { + output: i32, + sender: mpsc::Sender, + } + + impl OnStateHook for ChannelStateHook { + fn on_state(&mut self, _result_and_state: &ResultAndState) { + let _ = self.sender.send(self.output); + } + } + + fn setup_test_recorder() -> Snapshotter { + let recorder = DebuggingRecorder::new(); + let snapshotter = recorder.snapshotter(); + recorder.install().unwrap(); + snapshotter + } + + #[test] + fn test_executor_metrics_hook_metrics_recorded() { + let snapshotter = setup_test_recorder(); + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, _rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let result_and_state = ResultAndState { + result: ExecutionResult::Success { + reason: SuccessReason::Stop, + gas_used: 100, + output: Output::Call(Bytes::default()), + logs: vec![], + gas_refunded: 0, + }, + state: { + let mut state = EvmState::default(); + let storage = + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + state.insert( + Default::default(), + Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }, + ); + state + }, + }; + let executor = MockExecutor { result_and_state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let snapshot = snapshotter.snapshot().into_vec(); + + for metric in snapshot { + let metric_name = metric.0.key().name(); + if metric_name == "sync.execution.accounts_loaded_histogram" || + metric_name == "sync.execution.storage_slots_loaded_histogram" || + metric_name == "sync.execution.bytecodes_loaded_histogram" + { + if let DebugValue::Histogram(vs) = metric.3 { + assert!( + vs.iter().any(|v| v.into_inner() > 0.0), + "metric {metric_name} not recorded" + ); + } + } + } + } + + #[test] + fn test_executor_metrics_hook_called() { + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let result_and_state = ResultAndState { + result: ExecutionResult::Revert { gas_used: 0, output: Default::default() }, + state: EvmState::default(), + }; + let executor = MockExecutor { result_and_state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let actual_output = rx.try_recv().unwrap(); + assert_eq!(actual_output, expected_output); + } +} From d8edf9c80ec2a339486c8b678642c90a59756a12 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Nov 2024 14:20:51 +0100 Subject: [PATCH 335/970] chore(sdk): define `FullBlock` trait (#12326) --- crates/primitives-traits/src/block/mod.rs | 9 +++++---- crates/primitives-traits/src/lib.rs | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 395cf61df14..519987606ee 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -6,13 +6,14 @@ use alloc::{fmt, vec::Vec}; use alloy_consensus::BlockHeader; use alloy_primitives::{Address, Sealable, B256}; +use reth_codecs::Compact; use crate::BlockBody; -/// Helper trait, unifies behaviour required of a block header. -pub trait Header: BlockHeader + Sealable {} +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullBlock: Block + Compact {} -impl Header for T where T: BlockHeader + Sealable {} +impl FullBlock for T where T: Block + Compact {} /// Abstraction of block data type. // todo: make sealable super-trait, depends on @@ -30,7 +31,7 @@ pub trait Block: + Into<(Self::Header, Self::Body)> { /// Header part of the block. - type Header: Header; + type Header: BlockHeader + Sealable; /// The block's body contains the transactions in the block. type Body: BlockBody; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 90a6935ae10..02f1664d799 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -31,7 +31,7 @@ mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; pub mod block; -pub use block::{body::BlockBody, Block}; +pub use block::{body::BlockBody, Block, FullBlock}; mod withdrawal; pub use withdrawal::Withdrawals; From 660ca389e76da84aa20eec6255ebdf5d5993339c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Nov 2024 14:21:17 +0100 Subject: [PATCH 336/970] chore(sdk): define `FullSignedTx` trait (#12327) --- crates/primitives-traits/src/lib.rs | 5 ++++- crates/primitives-traits/src/transaction/signed.rs | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 02f1664d799..3316e713541 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -25,7 +25,10 @@ pub mod receipt; pub use receipt::Receipt; pub mod transaction; -pub use transaction::{signed::SignedTransaction, FullTransaction, Transaction}; +pub use transaction::{ + signed::{FullSignedTx, SignedTransaction}, + FullTransaction, Transaction, +}; mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 4c12437212a..555cc3851f8 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -2,12 +2,18 @@ use alloc::fmt; use core::hash::Hash; +use reth_codecs::Compact; use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, Signature, TxHash, B256}; use revm_primitives::TxEnv; +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullSignedTx: SignedTransaction + Compact {} + +impl FullSignedTx for T where T: SignedTransaction + Compact {} + /// A signed transaction. pub trait SignedTransaction: fmt::Debug From b5f8c72095d65dc13a986b785ecd1f627a94a582 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 6 Nov 2024 15:27:06 +0100 Subject: [PATCH 337/970] chore: misc trait bounds (#12347) --- crates/payload/basic/src/lib.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 9c90f7f83e0..867125d808b 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -14,16 +14,14 @@ use alloy_eips::merge::SLOT_DURATION; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; use reth_evm::state_change::post_block_withdrawals_balance_increments; use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader, Withdrawals}; -use reth_provider::{ - BlockReaderIdExt, CanonStateNotification, ProviderError, StateProviderFactory, -}; +use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; @@ -991,12 +989,16 @@ impl WithdrawalsOutcome { /// Returns the withdrawals root. /// /// Returns `None` values pre shanghai -pub fn commit_withdrawals>( +pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, withdrawals: Withdrawals, -) -> Result { +) -> Result +where + DB: Database, + ChainSpec: EthereumHardforks, +{ if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { return Ok(WithdrawalsOutcome::pre_shanghai()) } @@ -1023,7 +1025,7 @@ pub fn commit_withdrawals>( /// /// This compares the total fees of the blocks, higher is better. #[inline(always)] -pub fn is_better_payload(best_payload: Option, new_fees: U256) -> bool { +pub fn is_better_payload(best_payload: Option<&T>, new_fees: U256) -> bool { if let Some(best_payload) = best_payload { new_fees > best_payload.fees() } else { From 12b0637485725ab72f9b2fafccc707249d9d2d11 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:35:47 +0100 Subject: [PATCH 338/970] refactor(trie): small refactor in `HashedPostState::from_reverts` (#12319) Co-authored-by: Matthias Seitz --- crates/trie/db/src/state.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 4d46183dfda..0d2171604d5 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -7,16 +7,12 @@ use reth_db_api::{ transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, }; -use std::{ - collections::{hash_map, HashMap}, - ops::RangeInclusive, -}; +use std::{collections::HashMap, ops::RangeInclusive}; use tracing::debug; /// Extends [`StateRoot`] with operations specific for working with a database transaction. @@ -222,13 +218,11 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> impl DatabaseHashedPostState for HashedPostState { fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. - let mut accounts = HashMap::>::default(); + let mut accounts = HashMap::new(); let mut account_changesets_cursor = tx.cursor_read::()?; for entry in account_changesets_cursor.walk_range(from..)? { let (_, AccountBeforeTx { address, info }) = entry?; - if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { - entry.insert(info); - } + accounts.entry(address).or_insert(info); } // Iterate over storage changesets and record value before first occurring storage change. @@ -239,9 +233,7 @@ impl DatabaseHashedPostState for HashedPostState { { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); - if let hash_map::Entry::Vacant(entry) = account_storage.entry(storage.key) { - entry.insert(storage.value); - } + account_storage.entry(storage.key).or_insert(storage.value); } let hashed_accounts = From 38fdc93a126ab2233266a355d5cb0dd048f1fe66 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 6 Nov 2024 18:50:25 +0400 Subject: [PATCH 339/970] feat: bump alloy (#12215) Co-authored-by: Matthias Seitz --- Cargo.lock | 216 ++++++------ Cargo.toml | 78 ++--- bin/reth-bench/src/bench/context.rs | 7 +- bin/reth-bench/src/bench/new_payload_fcu.rs | 9 +- bin/reth-bench/src/bench/new_payload_only.rs | 5 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/common/src/validation.rs | 5 +- crates/consensus/debug-client/src/client.rs | 18 +- .../debug-client/src/providers/rpc.rs | 6 +- crates/ethereum/node/tests/e2e/p2p.rs | 3 +- crates/ethereum/node/tests/e2e/rpc.rs | 7 +- crates/net/eth-wire-types/src/blocks.rs | 10 +- crates/net/eth-wire-types/src/transactions.rs | 30 +- .../network/tests/it/big_pooled_txs_req.rs | 2 +- crates/net/network/tests/it/requests.rs | 4 +- crates/net/network/tests/it/txgossip.rs | 2 +- crates/optimism/chainspec/src/lib.rs | 8 +- crates/optimism/evm/src/execute.rs | 4 +- crates/optimism/node/src/txpool.rs | 2 +- crates/optimism/rpc/src/eth/transaction.rs | 63 ++-- crates/primitives-traits/src/header/sealed.rs | 7 + .../src/transaction/signed.rs | 2 +- crates/primitives/Cargo.toml | 2 + crates/primitives/src/alloy_compat.rs | 331 ++++++------------ crates/primitives/src/transaction/mod.rs | 154 +++----- crates/primitives/src/transaction/pooled.rs | 58 +-- crates/primitives/src/transaction/sidecar.rs | 108 +----- .../primitives/src/transaction/signature.rs | 94 ++--- crates/primitives/src/transaction/util.rs | 24 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 6 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 30 +- crates/rpc/rpc-eth-api/src/helpers/signer.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 4 +- crates/rpc/rpc-eth-api/src/types.rs | 5 +- crates/rpc/rpc-eth-types/src/simulate.rs | 9 +- crates/rpc/rpc-types-compat/src/block.rs | 95 +---- crates/rpc/rpc-types-compat/src/proof.rs | 6 +- .../rpc-types-compat/src/transaction/mod.rs | 46 +-- crates/rpc/rpc/src/eth/helpers/signer.rs | 8 +- crates/rpc/rpc/src/eth/helpers/types.rs | 97 ++--- crates/rpc/rpc/src/eth/pubsub.rs | 2 +- crates/rpc/rpc/src/otterscan.rs | 8 +- .../codecs/src/alloy/authorization_list.rs | 3 +- crates/storage/codecs/src/alloy/signature.rs | 6 +- .../provider/src/providers/consistent.rs | 4 +- .../src/providers/database/provider.rs | 2 +- .../src/providers/static_file/manager.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 8 +- crates/storage/provider/src/writer/mod.rs | 10 +- .../transaction-pool/src/test_utils/mock.rs | 4 +- crates/transaction-pool/src/traits.rs | 2 +- testing/testing-utils/src/generators.rs | 16 +- 52 files changed, 589 insertions(+), 1047 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f70611f7a5c..fba1e61e037 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed961a48297c732a5d97ee321aa8bb5009ecadbcb077d8bec90cb54e651629" +checksum = "7109d565c7157ee2c10beea7911a71130aa6c3cb6dfeaf66905a98f69b96a754" dependencies = [ "alloy-eips", "alloy-primitives", @@ -124,15 +124,16 @@ dependencies = [ "auto_impl", "c-kzg", "derive_more 1.0.0", + "rand 0.8.5", "serde", "serde_with", ] [[package]] name = "alloy-contract" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460ab80ce4bda1c80bcf96fe7460520476f2c7b734581c6567fac2708e2a60ef" +checksum = "08f16c29a39afa238e35ee4ba06ca2e1c3a4764c2096e94c66730688a0471be7" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -150,9 +151,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5228b189b18b85761340dc9eaac0141148a8503657b36f9bc3a869413d987ca" +checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -181,9 +182,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ffc577390ce50234e02d841214b3dc0bea6aaaae8e04bbf3cb82e9a45da9eb" +checksum = "69fb9fd842fdf10a524bbf2c4de6942ad869c1c8c3d128a1b09e67ed5f7cedbd" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -197,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69e06cf9c37be824b9d26d6d101114fdde6af0c87de2828b414c05c4b3daa71" +checksum = "711de3f04cf728259ff149f725df12a8595b6b10baefafb0a0447201c72d76de" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -218,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde15e14944a88bd6a57d325e9a49b75558746fe16aaccc79713ae50a6a9574c" +checksum = "76b8fa6253466bd6f4b5ba3d725d350f7a05de494dd1b8d01537eafe934667e9" dependencies = [ "alloy-primitives", "alloy-serde", @@ -229,9 +230,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31a0f0d51db8a1a30a4d98a9f90e090a94c8f44cb4d9eafc7e03aa6d00aae984" +checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -241,9 +242,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af5979e0d5a7bf9c7eb79749121e8256e59021af611322aee56e77e20776b4b3" +checksum = "b9278d6d554510136d9e0e4e51de4f5a9a4baffc8975f29e9acd01e12b2e045c" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -255,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "204237129086ce5dc17a58025e93739b01b45313841f98fa339eb1d780511e57" +checksum = "85d356983dea86089b05674d5ef88a7168a5c34a523ef62e2e3c8a9847ce0822" dependencies = [ "alloy-consensus", "alloy-eips", @@ -271,14 +272,16 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", + "serde", + "serde_json", "thiserror", ] [[package]] name = "alloy-network-primitives" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514f70ee2a953db21631cd817b13a1571474ec77ddc03d47616d5e8203489fde" +checksum = "18fbc9778e7989877465888383a7533c7318a9200d7229336bcc2b0277df36ba" dependencies = [ "alloy-consensus", "alloy-eips", @@ -289,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27444ea67d360508753022807cdd0b49a95c878924c9c5f8f32668b7d7768245" +checksum = "6fe5fd811738d37c56318378802b7bc3cc44e4d12b532641374309a10a04c515" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -306,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8edae627382349b56cd6a7a2106f4fd69b243a9233e560c55c2e03cabb7e1d3c" +checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" dependencies = [ "alloy-rlp", "arbitrary", @@ -338,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4814d141ede360bb6cd1b4b064f1aab9de391e7c4d0d4d50ac89ea4bc1e25fbd" +checksum = "fe3189c8cf3c3e9185862ac0d0b2a9d6bf00e4395746c7ec36307a4db0d5d486" dependencies = [ "alloy-chains", "alloy-consensus", @@ -379,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba46eb69ddf7a9925b81f15229cb74658e6eebe5dd30a5b74e2cd040380573" +checksum = "51358f866bcb93b8440c08086557e415c455cdc8d63754fa339611a3e215b038" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -420,9 +423,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc2bd1e7403463a5f2c61e955bcc9d3072b63aa177442b0f9aa6a6d22a941e3" +checksum = "d847913cea3fcd64fb1fe1247fafe15aab4060a2d24e535bbffcaa4670de9a79" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -445,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eea9bf1abdd506f985a53533f5ac01296bcd6102c5e139bbc5d40bc468d2c916" +checksum = "37ef8fd215cf81ddb0565815e1592a87377fa1e259db8ca4e683e6659fdf5c08" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -458,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea02c25541fb19eaac4278aa5c41d2d7e0245898887e54a74bfc0f3103e99415" +checksum = "647f703e27edad1f9c97455a6434378ea70b4ca9ae95f5e1559acf354c69bc14" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -470,20 +473,21 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2382fc63fb0cf3e02818d547b80cb66cc49a31f8803d0c328402b2008bc13650" +checksum = "6805b1626b084c231b2ec70c05090d45ce914d22e47f6cd4e8426f43098bbdf1" dependencies = [ "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-beacon" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45357a642081c8ce235c0ad990c4e9279f5f18a723545076b38cfcc05cc25234" +checksum = "7ed50d4f427bcb5bc561b3e6f45238158db6592deabcfbecb03c7ca9dadafe98" dependencies = [ "alloy-eips", "alloy-primitives", @@ -495,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5afe3ab1038f90faf56304aa0adf1e6a8c9844615d8f83967f932f3a70390b1" +checksum = "e00a212581221f03d18c4239a1b985d695205a9518468a0b11ef64a143dd0724" dependencies = [ "alloy-primitives", "serde", @@ -505,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "886d22d41992287a235af2f3af4299b5ced2bcafb81eb835572ad35747476946" +checksum = "7312fb85ef76428f8e20f50d1505494be9d081ffdb5cbf6a25c153c7b530994c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -526,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b034779a4850b4b03f5be5ea674a1cf7d746b2da762b34d1860ab45e48ca27" +checksum = "eba7afa617e7942ba5df88ca063a99e9f51e67df2de816fd52513e64926145a3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -547,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3246948dfa5f5060a9abe04233d741ea656ef076b12958f3242416ce9f375058" +checksum = "8b479e525a57388821d05c99732b3f6195128d8b74c9372329287f5e0d47d0aa" dependencies = [ "alloy-eips", "alloy-primitives", @@ -560,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5fb6c5c401321f802f69dcdb95b932f30f8158f6798793f914baac5995628e" +checksum = "563105a7fb420d44bd30bfe043f5bba8b6fe78432d8da99f4148aa7226d90d69" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -574,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ad066b49c3b1b5f64cdd2399177a19926a6a15db2dbf11e2098de621f9e7480" +checksum = "f0da9410a730ced6e30cd349e6d9f39bc9e37ca1bb58a39691e276d7a4061631" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -586,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028e72eaa9703e4882344983cfe7636ce06d8cce104a78ea62fd19b46659efc4" +checksum = "5c9f13d8c9180dcced875f91f1876e428941cec151fc501637f68ad30d088d89" dependencies = [ "alloy-primitives", "arbitrary", @@ -598,9 +602,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "592c185d7100258c041afac51877660c7bf6213447999787197db4842f0e938e" +checksum = "796f951bcd6a00f9fb53265676eed9fab6feb37d1eb912b70fc2654be5e5a560" dependencies = [ "alloy-primitives", "async-trait", @@ -612,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6614f02fc1d5b079b2a4a5320018317b506fd0a6d67c1fd5542a71201724986c" +checksum = "56e2a3fb629bbe89cfba73699a4be64d6dc3bd73691f2e43f2a35448294ffbf9" dependencies = [ "alloy-consensus", "alloy-network", @@ -630,9 +634,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841eabaa4710f719fddbc24c95d386eae313f07e6da4babc25830ee37945be0c" +checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -644,9 +648,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6672337f19d837b9f7073c45853aeb528ed9f7dd6a4154ce683e9e5cb7794014" +checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -662,9 +666,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dff37dd20bfb118b777c96eda83b2067f4226d2644c5cfa00187b3bc01770ba" +checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" dependencies = [ "const-hex", "dunce", @@ -677,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b853d42292dbb159671a3edae3b2750277ff130f32b726fe07dc2b17aa6f2b5" +checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" dependencies = [ "serde", "winnow", @@ -687,9 +691,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa828bb1b9a6dc52208fbb18084fb9ce2c30facc2bfda6a5d922349b4990354f" +checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -700,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be77579633ebbc1266ae6fd7694f75c408beb1aeb6865d0b18f22893c265a061" +checksum = "4d315ab988e06f6b12038a3d7811957da28a8b378ff0d084b0819ebae1746ead" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -720,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fd1a5d0827939847983b46f2f79510361f901dc82f8e3c38ac7397af142c6e" +checksum = "166449a8d8867be0978d6fa85aa56b89a27988b09e57bfd1f3b9962a9c8d5bae" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -735,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8073d1186bfeeb8fbdd1292b6f1a0731f3aed8e21e1463905abfae0b96a887a6" +checksum = "716b21dce8e7ea29a5d459ed4b6d29a13a71d2b766fd84ac15e68623662b5d87" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -754,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f27837bb4a1d6c83a28231c94493e814882f0e9058648a97e908a5f3fc9fcf" +checksum = "b577974182a4e6d9b1a1ecd5a7fd48da9d239a1e214e2368d37b3179e86cd8c3" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -5270,9 +5274,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26c3b35b7b3e36d15e0563eebffe13c1d9ca16b7aaffcb6a64354633547e16b" +checksum = "ae4582945fa96ae0ed78babcac6e41f025460e30ed0c9781aaeedf878fc2b527" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5288,9 +5292,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccacc2efed3d60d98ea581bddb885df1c6c62a592e55de049cfefd94116112cd" +checksum = "d1ece4a037c56536d8b517d045cef9cc07364c578709c184d33817108309c31e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5302,23 +5306,24 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff6fc0f94702ea0f4d8466bffdc990067ae6df9213465df9b7957f74f1e5461" +checksum = "05e9b64d15a7bf27a06c16eb286349aa2d4e3173260a8ab1fe73bd2c13c89769" dependencies = [ "alloy-consensus", "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", + "alloy-signer", "op-alloy-consensus", "op-alloy-rpc-types", ] [[package]] name = "op-alloy-protocol" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f8e6ec6b91c6aaeb20860b455a52fd8e300acfe5d534e96e9073a24f853e74" +checksum = "aa989d1ea8deced466b0edd7a447264b1f934fd740ab895d32b8544dcce3b151" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5336,9 +5341,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bae9bf91b620e1e2c2291562e5998bc1247bd8ada011773e1997b31a95de99" +checksum = "ca6e53039829ff0b3482d8dd02cb2de45d5c7b889023c7e4588a43ea7451664a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5347,6 +5352,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-serde", "arbitrary", + "derive_more 1.0.0", "op-alloy-consensus", "serde", "serde_json", @@ -5354,10 +5360,11 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b52ee59c86537cff83e8c7f2a6aa287a94f3608bb40c06d442aafd0c2e807a4" +checksum = "283b19e1e7fef1ca9078df39f45a48609cacf856b7b441ed6cf19301ed162cca" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", @@ -8442,6 +8449,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", @@ -9344,9 +9352,9 @@ dependencies = [ [[package]] name = "revm" -version = "17.1.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055bee6a81aaeee8c2389ae31f0d4de87f44df24f4444a1116f9755fd87a76ad" +checksum = "15689a3c6a8d14b647b4666f2e236ef47b5a5133cdfd423f545947986fff7013" dependencies = [ "auto_impl", "cfg-if", @@ -9359,9 +9367,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e29c662f7887f3b659d4b0fd234673419a8fcbeaa1ecc29bf7034c0a75cc8ea" +checksum = "747291a18ad6726a08dd73f8b6a6b3a844db582ecae2063ccf0a04880c44f482" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9378,9 +9386,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac2034454f8bc69dc7d3c94cdb1b57559e27f5ef0518771f1787de543d7d6a1" +checksum = "74e3f11d0fed049a4a10f79820c59113a79b38aed4ebec786a79d5c667bfeb51" dependencies = [ "revm-primitives", "serde", @@ -9388,9 +9396,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "14.0.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a88c8c7c5f9b988a9e65fc0990c6ce859cdb74114db705bd118a96d22d08027" +checksum = "e381060af24b750069a2b2d2c54bba273d84e8f5f9e8026fc9262298e26cc336" dependencies = [ "aurora-engine-modexp", "blst", @@ -9408,9 +9416,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d11fa1e195b0bebaf3fb18596f314a13ba3a4cb1fdd16d3465934d812fd921e" +checksum = "3702f132bb484f4f0d0ca4f6fbde3c82cfd745041abbedd6eda67730e1868ef0" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10388,9 +10396,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16320d4a2021ba1a32470b3759676114a918885e9800e68ad60f2c67969fba62" +checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" dependencies = [ "paste", "proc-macro2", @@ -10522,18 +10530,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" +checksum = "3b3c6efbfc763e64eb85c11c25320f0737cb7364c4b6336db90aa9ebe27a0bbd" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" +checksum = "b607164372e89797d78b8e23a6d67d5d1038c1c65efd52e1389ef8b77caba2a6" dependencies = [ "proc-macro2", "quote", @@ -11437,9 +11445,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.2.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" +checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" dependencies = [ "futures", "js-sys", diff --git a/Cargo.toml b/Cargo.toml index 10d39b11b99..b8734afe664 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -418,60 +418,60 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "17.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.10.0" -revm-primitives = { version = "13.0.0", features = [ +revm = { version = "18.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.11.0" +revm-primitives = { version = "14.0.0", features = [ "std", ], default-features = false } # eth alloy-chains = "0.1.32" -alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.9", default-features = false } +alloy-dyn-abi = "0.8.11" +alloy-primitives = { version = "0.8.11", default-features = false } alloy-rlp = "0.3.4" -alloy-sol-types = "0.8.0" +alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.5.4", default-features = false } -alloy-contract = { version = "0.5.4", default-features = false } -alloy-eips = { version = "0.5.4", default-features = false } -alloy-genesis = { version = "0.5.4", default-features = false } -alloy-json-rpc = { version = "0.5.4", default-features = false } -alloy-network = { version = "0.5.4", default-features = false } -alloy-network-primitives = { version = "0.5.4", default-features = false } -alloy-node-bindings = { version = "0.5.4", default-features = false } -alloy-provider = { version = "0.5.4", features = [ +alloy-consensus = { version = "0.6.0", default-features = false } +alloy-contract = { version = "0.6.0", default-features = false } +alloy-eips = { version = "0.6.0", default-features = false } +alloy-genesis = { version = "0.6.0", default-features = false } +alloy-json-rpc = { version = "0.6.0", default-features = false } +alloy-network = { version = "0.6.0", default-features = false } +alloy-network-primitives = { version = "0.6.0", default-features = false } +alloy-node-bindings = { version = "0.6.0", default-features = false } +alloy-provider = { version = "0.6.0", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.5.4", default-features = false } -alloy-rpc-client = { version = "0.5.4", default-features = false } -alloy-rpc-types = { version = "0.5.4", features = [ +alloy-pubsub = { version = "0.6.0", default-features = false } +alloy-rpc-client = { version = "0.6.0", default-features = false } +alloy-rpc-types = { version = "0.6.0", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.5.4", default-features = false } -alloy-rpc-types-anvil = { version = "0.5.4", default-features = false } -alloy-rpc-types-beacon = { version = "0.5.4", default-features = false } -alloy-rpc-types-debug = { version = "0.5.4", default-features = false } -alloy-rpc-types-engine = { version = "0.5.4", default-features = false } -alloy-rpc-types-eth = { version = "0.5.4", default-features = false } -alloy-rpc-types-mev = { version = "0.5.4", default-features = false } -alloy-rpc-types-trace = { version = "0.5.4", default-features = false } -alloy-rpc-types-txpool = { version = "0.5.4", default-features = false } -alloy-serde = { version = "0.5.4", default-features = false } -alloy-signer = { version = "0.5.4", default-features = false } -alloy-signer-local = { version = "0.5.4", default-features = false } -alloy-transport = { version = "0.5.4" } -alloy-transport-http = { version = "0.5.4", features = [ +alloy-rpc-types-admin = { version = "0.6.0", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.0", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.0", default-features = false } +alloy-rpc-types-debug = { version = "0.6.0", default-features = false } +alloy-rpc-types-engine = { version = "0.6.0", default-features = false } +alloy-rpc-types-eth = { version = "0.6.0", default-features = false } +alloy-rpc-types-mev = { version = "0.6.0", default-features = false } +alloy-rpc-types-trace = { version = "0.6.0", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.0", default-features = false } +alloy-serde = { version = "0.6.0", default-features = false } +alloy-signer = { version = "0.6.0", default-features = false } +alloy-signer-local = { version = "0.6.0", default-features = false } +alloy-transport = { version = "0.6.0" } +alloy-transport-http = { version = "0.6.0", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.5.4", default-features = false } -alloy-transport-ws = { version = "0.5.4", default-features = false } +alloy-transport-ipc = { version = "0.6.0", default-features = false } +alloy-transport-ws = { version = "0.6.0", default-features = false } # op -op-alloy-rpc-types = "0.5" -op-alloy-rpc-types-engine = "0.5" -op-alloy-network = "0.5" -op-alloy-consensus = "0.5" +op-alloy-rpc-types = "0.6" +op-alloy-rpc-types-engine = "0.6" +op-alloy-network = "0.6" +op-alloy-consensus = "0.6" # misc aquamarine = "0.6" @@ -633,4 +633,4 @@ tracy-client = "0.17.3" #op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } \ No newline at end of file +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 5f8936934c6..59533bc6e97 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -74,14 +74,17 @@ impl BenchContext { let first_block = match benchmark_mode { BenchMode::Continuous => { // fetch Latest block - block_provider.get_block_by_number(BlockNumberOrTag::Latest, true).await?.unwrap() + block_provider + .get_block_by_number(BlockNumberOrTag::Latest, true.into()) + .await? + .unwrap() } BenchMode::Range(ref mut range) => { match range.next() { Some(block_number) => { // fetch first block in range block_provider - .get_block_by_number(block_number.into(), true) + .get_block_by_number(block_number.into(), true.into()) .await? .unwrap() } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index ca5359fb8c2..dd2f863e2c9 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -43,16 +43,17 @@ impl Command { let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); let head_block_hash = block.hash(); let safe_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(32).into(), false); + .get_block_by_number(block.number.saturating_sub(32).into(), false.into()); let finalized_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(64).into(), false); + .get_block_by_number(block.number.saturating_sub(64).into(), false.into()); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 85342d1af76..68b2f76527d 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -43,10 +43,11 @@ impl Command { let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); next_block += 1; sender.send(block).await.unwrap(); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1674081fe70..65e55d7d9f8 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1377,7 +1377,7 @@ mod tests { use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::eip1559::INITIAL_BASE_FEE; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, Sealable, Signature, B256}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, Sealable, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index d4dea07dcda..46a9e4d1572 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -277,7 +277,8 @@ mod tests { use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; use alloy_primitives::{ - hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, Signature, U256, + hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, + Sealable, U256, }; use mockall::mock; use rand::Rng; @@ -403,7 +404,7 @@ mod tests { blob_versioned_hashes: std::iter::repeat_with(|| rng.gen()).take(num_blobs).collect(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); TransactionSigned::from_transaction_and_signature(request, signature) } diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index a6a59a6a380..b6993a41b90 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -1,4 +1,4 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::Transaction; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rpc_types::{Block, BlockTransactions}; @@ -184,18 +184,19 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { // https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#specification let versioned_hashes = transactions .iter() - .flat_map(|tx| tx.blob_versioned_hashes.clone().unwrap_or_default()) + .flat_map(|tx| tx.blob_versioned_hashes().unwrap_or_default()) + .copied() .collect(); let payload: ExecutionPayloadV3 = ExecutionPayloadV3 { payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: block.header.parent_hash, - fee_recipient: block.header.miner, + fee_recipient: block.header.beneficiary, state_root: block.header.state_root, receipts_root: block.header.receipts_root, logs_bloom: block.header.logs_bloom, - prev_randao: block.header.mix_hash.unwrap(), + prev_randao: block.header.mix_hash, block_number: block.header.number, gas_limit: block.header.gas_limit, gas_used: block.header.gas_used, @@ -205,15 +206,10 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { block_hash: block.header.hash, transactions: transactions .into_iter() - .map(|tx| { - let envelope: TxEnvelope = tx.try_into().unwrap(); - let mut buffer: Vec = vec![]; - envelope.encode_2718(&mut buffer); - buffer.into() - }) + .map(|tx| tx.inner.encoded_2718().into()) .collect(), }, - withdrawals: block.withdrawals.clone().unwrap_or_default(), + withdrawals: block.withdrawals.clone().unwrap_or_default().into_inner(), }, blob_gas_used: block.header.blob_gas_used.unwrap(), excess_blob_gas: block.header.excess_blob_gas.unwrap(), diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index a8cd15c105a..5312bd55b3f 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -30,9 +30,9 @@ impl BlockProvider for RpcBlockProvider { .expect("failed to subscribe on new blocks") .into_stream(); - while let Some(block) = stream.next().await { + while let Some(header) = stream.next().await { let full_block = ws_provider - .get_block_by_hash(block.header.hash, BlockTransactionsKind::Full) + .get_block_by_hash(header.hash, BlockTransactionsKind::Full) .await .expect("failed to get block") .expect("block not found"); @@ -49,7 +49,7 @@ impl BlockProvider for RpcBlockProvider { .await .expect("failed to create WS provider"); let block: Block = ws_provider - .get_block_by_number(BlockNumberOrTag::Number(block_number), true) + .get_block_by_number(BlockNumberOrTag::Number(block_number), true.into()) .await? .ok_or_else(|| eyre::eyre!("block not found by number {}", block_number))?; Ok(block) diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 180b88bbd5a..5b2a6654fbb 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -157,7 +157,8 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { assert_eq!(second_provider.get_block_number().await?, 0); - let head = provider.get_block_by_number(Default::default(), false).await?.unwrap().header.hash; + let head = + provider.get_block_by_number(Default::default(), false.into()).await?.unwrap().header.hash; second_node.engine_api.update_forkchoice(head, head).await?; let start = std::time::Instant::now(); diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index 1f7ac32e048..94ae997eed6 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -69,7 +69,7 @@ async fn test_fee_history() -> eyre::Result<()> { let receipt = builder.get_receipt().await?; assert!(receipt.status()); - let block = provider.get_block_by_number(1.into(), false).await?.unwrap(); + let block = provider.get_block_by_number(1.into(), false.into()).await?.unwrap(); assert_eq!(block.header.gas_used as u128, receipt.gas_used,); assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64); @@ -89,7 +89,7 @@ async fn test_fee_history() -> eyre::Result<()> { let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?; let mut prev_header = provider - .get_block_by_number((latest_block + 1 - block_count).into(), false) + .get_block_by_number((latest_block + 1 - block_count).into(), false.into()) .await? .unwrap() .header; @@ -101,7 +101,8 @@ async fn test_fee_history() -> eyre::Result<()> { chain_spec.base_fee_params_at_block(block), ); - let header = provider.get_block_by_number(block.into(), false).await?.unwrap().header; + let header = + provider.get_block_by_number(block.into(), false.into()).await?.unwrap().header; assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee as u64); assert_eq!( diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index ce23dfe707f..c24fc45022f 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -114,7 +114,7 @@ mod tests { }; use alloy_consensus::TxLegacy; use alloy_eips::BlockHashOrNumber; - use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{Header, Transaction, TransactionSigned}; use std::str::FromStr; @@ -373,7 +373,7 @@ mod tests { }), Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { @@ -387,7 +387,7 @@ mod tests { }), Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Parity(false), + false, ), ), ], @@ -446,7 +446,7 @@ mod tests { Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Eip155(37), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -462,7 +462,7 @@ mod tests { Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Eip155(37), + false, ), ), ], diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 7c66f657a1d..97d18001f13 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -78,7 +78,7 @@ impl FromIterator for PooledTransactions { mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_consensus::{TxEip1559, TxLegacy}; - use alloy_primitives::{hex, Parity, Signature, TxKind, U256}; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_primitives::{PooledTransactionsElement, Transaction, TransactionSigned}; @@ -142,7 +142,7 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -164,7 +164,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; @@ -208,7 +208,7 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Eip155(37), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -230,7 +230,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Eip155(37), + false, ), ), ]; @@ -275,7 +275,7 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Eip155(44), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -299,7 +299,7 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -321,7 +321,7 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Eip155(43), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -343,7 +343,7 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Eip155(43), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -365,7 +365,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Eip155(43), + false, ), ), ]; @@ -414,7 +414,7 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -438,7 +438,7 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -460,7 +460,7 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -482,7 +482,7 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -504,7 +504,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 29b62708eee..4d65e3f63ba 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Signature, B256}; +use alloy_primitives::{PrimitiveSignature as Signature, B256}; use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 8c00302f7b4..58e46e3fb09 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_consensus::TxEip2930; -use alloy_primitives::{Bytes, Parity, Signature, TxKind, U256}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; use reth_network::{ @@ -31,7 +31,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { input: Bytes::from(vec![1, 2]), access_list: Default::default(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); TransactionSigned::from_transaction_and_signature(request, signature) } diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index f08a2b2eb96..2e2ee4a031a 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use alloy_consensus::TxLegacy; -use alloy_primitives::{Signature, U256}; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 3248625d604..a60a9a22abc 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -20,7 +20,7 @@ mod op_sepolia; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; use alloy_genesis::Genesis; -use alloy_primitives::{Bytes, Parity, Signature, B256, U256}; +use alloy_primitives::{Bytes, B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; use derive_more::{Constructor, Deref, Display, From, Into}; @@ -256,12 +256,6 @@ pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), Deco Ok((u32::from_be_bytes(denominator), u32::from_be_bytes(elasticity))) } -/// Returns the signature for the optimism deposit transactions, which don't include a -/// signature. -pub fn optimism_deposit_tx_signature() -> Signature { - Signature::new(U256::ZERO, U256::ZERO, Parity::Parity(false)) -} - impl EthChainSpec for OpChainSpec { fn chain(&self) -> alloy_chains::Chain { self.inner.chain() diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index d64f4bd5ea5..3702f13a47d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -297,7 +297,9 @@ mod tests { use super::*; use crate::OpChainSpec; use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, Signature, StorageKey, StorageValue}; + use alloy_primitives::{ + b256, Address, PrimitiveSignature as Signature, StorageKey, StorageValue, + }; use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index b1255a987e9..0edfeec7322 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -231,7 +231,7 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{Signature, TxKind, U256}; + use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; use reth_chainspec::MAINNET; use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 6b5954391d9..b019ce0e97f 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,14 +1,13 @@ //! Loads and formats OP transaction RPC response. -use alloy_consensus::Transaction as _; +use alloy_consensus::Signed; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types::TransactionInfo; -use op_alloy_consensus::DepositTransaction; +use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; -use reth_rpc::eth::EthTxBuilder; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, @@ -83,14 +82,25 @@ where tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, ) -> Self::Transaction { - let signed_tx = tx.clone().into_signed(); - let hash = tx.hash; - - let mut inner = EthTxBuilder.fill(tx, tx_info); - - if signed_tx.is_deposit() { - inner.gas_price = Some(signed_tx.max_fee_per_gas()) - } + let from = tx.signer(); + let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + + let inner = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(_) => unreachable!(), + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Deposit(tx) => OpTxEnvelope::Deposit(tx), + }; let deposit_receipt_version = self .inner @@ -100,22 +110,29 @@ where .flatten() .and_then(|receipt| receipt.deposit_receipt_version); + let TransactionInfo { block_hash, block_number, index: transaction_index, .. } = tx_info; + Transaction { - inner, - source_hash: signed_tx.source_hash(), - mint: signed_tx.mint(), - // only include is_system_tx if true: - is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) - .then_some(true), + inner: alloy_rpc_types::Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + }, deposit_receipt_version, } } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); - } - - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or_default() + let input = match &mut tx.inner.inner { + OpTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Deposit(tx) => &mut tx.input, + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 7119a37e742..7552ece31f1 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,4 +1,5 @@ use super::Header; +use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable}; #[cfg(any(test, feature = "test-utils"))] @@ -132,6 +133,12 @@ impl SealedHeader { } } +impl From> for Sealed { + fn from(value: SealedHeader) -> Self { + Self::new_unchecked(value.header, value.hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 555cc3851f8..c40403865df 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -6,7 +6,7 @@ use reth_codecs::Compact; use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; -use alloy_primitives::{keccak256, Address, Signature, TxHash, B256}; +use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, TxHash, B256}; use revm_primitives::TxEnv; /// Helper trait that unifies all behaviour required by block to support full node operations. diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 5e761f41fe2..04d96aa369a 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -22,6 +22,7 @@ reth-codecs = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true +alloy-network = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } @@ -138,6 +139,7 @@ alloy-compat = [ "dep:alloy-rpc-types", "dep:alloy-serde", "dep:op-alloy-rpc-types", + "dep:alloy-network", ] test-utils = [ "reth-primitives-traits/test-utils", diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index d86bd04c7b9..462b27f9c73 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,26 +1,20 @@ //! Common conversions from alloy types. -use crate::{ - transaction::extract_chain_id, Block, BlockBody, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, -}; +use crate::{Block, BlockBody, Transaction, TransactionSigned}; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{ - constants::EMPTY_TRANSACTIONS, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy, -}; -use alloy_primitives::{Parity, Signature, TxKind}; -use alloy_rlp::Error as RlpError; +use alloy_consensus::{constants::EMPTY_TRANSACTIONS, Header, TxEnvelope}; +use alloy_network::{AnyHeader, AnyRpcBlock, AnyRpcTransaction, AnyTxEnvelope}; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; -impl TryFrom>> for Block { +impl TryFrom for Block { type Error = alloy_rpc_types::ConversionError; - fn try_from( - block: alloy_rpc_types::Block>, - ) -> Result { + fn try_from(block: AnyRpcBlock) -> Result { use alloy_rpc_types::ConversionError; + let block = block.inner; + let transactions = { let transactions: Result, ConversionError> = match block .transactions @@ -35,241 +29,134 @@ impl TryFrom> for Transaction { +impl TryFrom for TransactionSigned { type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: WithOtherFields) -> Result { - use alloy_eips::eip2718::Eip2718Error; + fn try_from(tx: AnyRpcTransaction) -> Result { use alloy_rpc_types::ConversionError; - #[cfg(feature = "optimism")] - let WithOtherFields { inner: tx, other } = tx; - #[cfg(not(feature = "optimism"))] let WithOtherFields { inner: tx, other: _ } = tx; - match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { - ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( - tx.transaction_type.unwrap(), - )) - })? { - None | Some(TxType::Legacy) => { - // legacy - if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { - return Err(ConversionError::Eip2718Error( - RlpError::Custom("EIP-1559 fields are present in a legacy transaction") - .into(), - )) - } - - // extract the chain id if possible - let chain_id = match tx.chain_id { - Some(chain_id) => Some(chain_id), - None => { - if let Some(signature) = tx.signature { - // TODO: make this error conversion better. This is needed because - // sometimes rpc providers return legacy transactions without a chain id - // explicitly in the response, however those transactions may also have - // a chain id in the signature from eip155 - extract_chain_id(signature.v.to()) - .map_err(|err| ConversionError::Eip2718Error(err.into()))? - .1 - } else { - return Err(ConversionError::MissingChainId) - } - } - }; - - Ok(Self::Legacy(TxLegacy { - chain_id, - nonce: tx.nonce, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - })) + let (transaction, signature, hash) = match tx.inner { + AnyTxEnvelope::Ethereum(TxEnvelope::Legacy(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Legacy(tx), signature, hash) } - Some(TxType::Eip2930) => { - // eip2930 - Ok(Self::Eip2930(TxEip2930 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip2930(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip2930(tx), signature, hash) } - Some(TxType::Eip1559) => { - // EIP-1559 - Ok(Self::Eip1559(TxEip1559 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip1559(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip1559(tx), signature, hash) } - Some(TxType::Eip4844) => { - // EIP-4844 - Ok(Self::Eip4844(TxEip4844 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.unwrap_or_default(), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - blob_versioned_hashes: tx - .blob_versioned_hashes - .ok_or(ConversionError::MissingBlobVersionedHashes)?, - max_fee_per_blob_gas: tx - .max_fee_per_blob_gas - .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip4844(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip4844(tx.into()), signature, hash) } - Some(TxType::Eip7702) => { - // this is currently unsupported as it is not present in alloy due to missing rpc - // specs - Err(ConversionError::Custom("Unimplemented".to_string())) - /* - // EIP-7702 - Ok(Transaction::Eip7702(TxEip7702 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - authorization_list: tx - .authorization_list - .ok_or(ConversionError::MissingAuthorizationList)?, - input: tx.input, - }))*/ + AnyTxEnvelope::Ethereum(TxEnvelope::Eip7702(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip7702(tx), signature, hash) } #[cfg(feature = "optimism")] - Some(TxType::Deposit) => { - let fields = other - .deserialize_into::() - .map_err(|e| ConversionError::Custom(e.to_string()))?; - Ok(Self::Deposit(op_alloy_consensus::TxDeposit { - source_hash: fields - .source_hash - .ok_or_else(|| ConversionError::Custom("MissingSourceHash".to_string()))?, - from: tx.from, - to: TxKind::from(tx.to), - mint: fields.mint.filter(|n| *n != 0), - value: tx.value, - gas_limit: tx.gas, - is_system_transaction: fields.is_system_tx.unwrap_or(false), - input: tx.input, - })) - } - } - } -} + AnyTxEnvelope::Unknown(alloy_network::UnknownTxEnvelope { hash, inner }) => { + use alloy_consensus::Transaction as _; -impl TryFrom> for TransactionSigned { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; - let transaction: Transaction = tx.try_into()?; - let y_parity = if let Some(y_parity) = signature.y_parity { - y_parity.0 - } else { - match transaction.tx_type() { - // If the transaction type is Legacy, adjust the v component of the - // signature according to the Ethereum specification - TxType::Legacy => { - extract_chain_id(signature.v.to()) - .map_err(|_| ConversionError::InvalidSignature)? - .0 + if inner.ty() == crate::TxType::Deposit { + let fields: op_alloy_rpc_types::OpTransactionFields = inner + .fields + .clone() + .deserialize_into::() + .map_err(|e| ConversionError::Custom(e.to_string()))?; + ( + Transaction::Deposit(op_alloy_consensus::TxDeposit { + source_hash: fields.source_hash.ok_or_else(|| { + ConversionError::Custom("MissingSourceHash".to_string()) + })?, + from: tx.from, + to: revm_primitives::TxKind::from(inner.to()), + mint: fields.mint.filter(|n| *n != 0), + value: inner.value(), + gas_limit: inner.gas_limit(), + is_system_transaction: fields.is_system_tx.unwrap_or(false), + input: inner.input().clone(), + }), + op_alloy_consensus::TxDeposit::signature(), + hash, + ) + } else { + return Err(ConversionError::Custom("unknown transaction type".to_string())) } - _ => !signature.v.is_zero(), } + _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - let mut parity = Parity::Parity(y_parity); - - if matches!(transaction.tx_type(), TxType::Legacy) { - if let Some(chain_id) = transaction.chain_id() { - parity = parity.with_chain_id(chain_id) - } - } - - Ok(Self::from_transaction_and_signature( - transaction, - Signature::new(signature.r, signature.s, parity), - )) - } -} - -impl TryFrom> for TransactionSignedEcRecovered { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let transaction: TransactionSigned = tx.try_into()?; - - transaction.try_into_ecrecovered().map_err(|_| ConversionError::InvalidSignature) - } -} - -impl TryFrom> for TransactionSignedNoHash { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - Ok(Self { - signature: tx.signature.ok_or(Self::Error::MissingSignature)?.try_into()?, - transaction: tx.try_into()?, - }) + Ok(Self { transaction, signature, hash }) } } @@ -278,7 +165,7 @@ impl TryFrom> for TransactionSigne mod tests { use super::*; use alloy_primitives::{address, Address, B256, U256}; - use alloy_rpc_types::Transaction as AlloyTransaction; + use revm_primitives::TxKind; #[test] fn optimism_deposit_tx_conversion_no_mint() { @@ -302,10 +189,11 @@ mod tests { "v": "0x0", "value": "0x0" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( deposit_tx.source_hash, @@ -352,10 +240,11 @@ mod tests { "v": "0x0", "value": "0x239c2e16a5ca590000" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 194ddf9c0ce..208474fc6c4 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -3,7 +3,8 @@ #[cfg(any(test, feature = "reth-codec"))] use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; use alloy_consensus::{ - SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, + transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, + TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ eip1898::BlockHashOrNumber, @@ -11,7 +12,9 @@ use alloy_eips::{ eip2930::AccessList, eip7702::SignedAuthorization, }; -use alloy_primitives::{keccak256, Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; +use alloy_primitives::{ + keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, +}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; @@ -22,7 +25,7 @@ use once_cell::sync::Lazy as LazyLock; use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; -use signature::{decode_with_eip155_chain_id, with_eip155_parity}; +use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] use std::sync::LazyLock; @@ -34,7 +37,7 @@ pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered pub use sidecar::BlobTransaction; pub use compat::FillTxEnv; -pub use signature::{extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked}; +pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; pub use variant::TransactionSignedVariant; @@ -383,38 +386,37 @@ impl Transaction { /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. - pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { - Encodable::encode(self, out); + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.encode_for_signing(out), + Self::Eip2930(tx) => tx.encode_for_signing(out), + Self::Eip1559(tx) => tx.encode_for_signing(out), + Self::Eip4844(tx) => tx.encode_for_signing(out), + Self::Eip7702(tx) => tx.encode_for_signing(out), + #[cfg(feature = "optimism")] + Self::Deposit(_) => {} + } } - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { + /// Produces EIP-2718 encoding of the transaction + pub fn eip2718_encode(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { match self { Self::Legacy(legacy_tx) => { // do nothing w/ with_header - legacy_tx.encode_with_signature_fields( - &with_eip155_parity(signature, legacy_tx.chain_id), - out, - ) + legacy_tx.eip2718_encode(signature, out); } Self::Eip2930(access_list_tx) => { - access_list_tx.encode_with_signature(signature, out, with_header) + access_list_tx.eip2718_encode(signature, out); } Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_with_signature(signature, out, with_header) + dynamic_fee_tx.eip2718_encode(signature, out); } - Self::Eip4844(blob_tx) => blob_tx.encode_with_signature(signature, out, with_header), + Self::Eip4844(blob_tx) => blob_tx.eip2718_encode(signature, out), Self::Eip7702(set_code_tx) => { - set_code_tx.encode_with_signature(signature, out, with_header) + set_code_tx.eip2718_encode(signature, out); } #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encode_inner(out, with_header), + Self::Deposit(deposit_tx) => deposit_tx.eip2718_encode(out), } } @@ -649,46 +651,6 @@ impl Default for Transaction { } } -impl Encodable for Transaction { - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - fn encode(&self, out: &mut dyn bytes::BufMut) { - match self { - Self::Legacy(legacy_tx) => { - legacy_tx.encode_for_signing(out); - } - Self::Eip2930(access_list_tx) => { - access_list_tx.encode_for_signing(out); - } - Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_for_signing(out); - } - Self::Eip4844(blob_tx) => { - blob_tx.encode_for_signing(out); - } - Self::Eip7702(set_code_tx) => { - set_code_tx.encode_for_signing(out); - } - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => { - deposit_tx.encode_inner(out, true); - } - } - } - - fn length(&self) -> usize { - match self { - Self::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), - Self::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), - Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), - Self::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), - Self::Eip7702(set_code_tx) => set_code_tx.payload_len_for_signature(), - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encoded_len(true), - } - } -} - impl alloy_consensus::Transaction for Transaction { fn chain_id(&self) -> Option { match self { @@ -891,7 +853,7 @@ impl TransactionSignedNoHash { pub fn hash(&self) -> B256 { // pre-allocate buffer for the transaction let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); - self.transaction.encode_with_signature(&self.signature, &mut buf, false); + self.transaction.eip2718_encode(&self.signature, &mut buf); keccak256(&buf) } @@ -925,7 +887,7 @@ impl TransactionSignedNoHash { /// This makes it possible to import pre bedrock transactions via the sender recovery stage. pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ buffer.clear(); - self.transaction.encode_without_signature(buffer); + self.transaction.encode_for_signing(buffer); // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -1034,7 +996,7 @@ impl reth_codecs::Compact for TransactionSignedNoHash { let bitflags = buf.get_u8() as usize; let sig_bit = bitflags & 1; - let (mut signature, buf) = Signature::from_compact(buf, sig_bit); + let (signature, buf) = Signature::from_compact(buf, sig_bit); let zstd_bit = bitflags >> 3; let (transaction, buf) = if zstd_bit != 0 { @@ -1063,10 +1025,6 @@ impl reth_codecs::Compact for TransactionSignedNoHash { Transaction::from_compact(buf, transaction_type) }; - if matches!(transaction, Transaction::Legacy(_)) { - signature = signature.with_parity(legacy_parity(&signature, transaction.chain_id())) - } - (Self { signature, transaction }, buf) } } @@ -1581,28 +1539,24 @@ impl Encodable2718 for TransactionSigned { fn encode_2718_len(&self) -> usize { match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( - &with_eip155_parity(&self.signature, legacy_tx.chain_id), - ), + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), Transaction::Eip2930(access_list_tx) => { - access_list_tx.encoded_len_with_signature(&self.signature, false) + access_list_tx.eip2718_encoded_length(&self.signature) } Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.encoded_len_with_signature(&self.signature, false) + dynamic_fee_tx.eip2718_encoded_length(&self.signature) } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), Transaction::Eip7702(set_code_tx) => { - set_code_tx.encoded_len_with_signature(&self.signature, false) + set_code_tx.eip2718_encoded_length(&self.signature) } #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.encode_with_signature(&self.signature, out, false) + self.transaction.eip2718_encode(&self.signature, out) } } @@ -1611,24 +1565,24 @@ impl Decodable2718 for TransactionSigned { match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { - let (tx, signature, hash) = TxEip2930::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) } TxType::Eip1559 => { - let (tx, signature, hash) = TxEip1559::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) } TxType::Eip7702 => { - let (tx, signature, hash) = TxEip7702::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) } TxType::Eip4844 => { - let (tx, signature, hash) = TxEip4844::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) } #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( - Transaction::Deposit(TxDeposit::decode(buf)?), + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), } @@ -1647,22 +1601,12 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { let secp = secp256k1::Secp256k1::new(); let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); - let mut signature = crate::sign_message( + let signature = crate::sign_message( B256::from_slice(&key_pair.secret_bytes()[..]), transaction.signature_hash(), ) .unwrap(); - signature = if matches!(transaction, Transaction::Legacy(_)) { - if let Some(chain_id) = transaction.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(alloy_primitives::Parity::NonEip155(bool::arbitrary(u)?)) - } - } else { - signature.with_parity_bool() - }; - #[cfg(feature = "optimism")] // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that @@ -1810,7 +1754,7 @@ pub mod serde_bincode_compat { transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, TxEip4844, }; - use alloy_primitives::{Signature, TxHash}; + use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; #[cfg(feature = "optimism")] use op_alloy_consensus::serde_bincode_compat::TxDeposit; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -2018,7 +1962,7 @@ mod tests { use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{ - address, b256, bytes, hex, Address, Bytes, Parity, Signature, B256, U256, + address, b256, bytes, hex, Address, Bytes, PrimitiveSignature as Signature, B256, U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; @@ -2127,7 +2071,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let hash = b256!("a517b206d2223278f860ea017d3626cacad4f52ff51030dc9a96b432f17f8d34"); test_decode_and_encode(&bytes, transaction, signature, Some(hash)); @@ -2147,7 +2091,7 @@ mod tests { .unwrap(), U256::from_str("0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2166,7 +2110,7 @@ mod tests { .unwrap(), U256::from_str("0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2187,7 +2131,7 @@ mod tests { .unwrap(), U256::from_str("0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469") .unwrap(), - Parity::Parity(true), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2206,7 +2150,7 @@ mod tests { .unwrap(), U256::from_str("0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860") .unwrap(), - Parity::Eip155(44), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); } @@ -2370,7 +2314,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let inputs: Vec> = vec![ diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 2d62bb3e685..0d48dd5a443 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,21 +1,17 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -use super::{ - error::TransactionConversionError, - signature::{recover_signer, with_eip155_parity}, - TxEip7702, -}; +use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, - transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, SignableTransaction, TxEip4844WithSidecar, }; use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, Signature, TxHash, B256}; +use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; @@ -402,59 +398,39 @@ impl Encodable2718 for PooledTransactionsElement { fn encode_2718_len(&self) -> usize { match self { Self::Legacy { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(&with_eip155_parity( - signature, - transaction.chain_id, - )) + transaction.eip2718_encoded_length(signature) } Self::Eip2930 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } Self::Eip1559 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } Self::Eip7702 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } - Self::BlobTransaction(blob_tx) => { - // the encoding does not use a header, so we set `with_header` to false - blob_tx.payload_len_with_type(false) + Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { + transaction.eip2718_encoded_length(signature) } } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more - // information on the exact format: - // - Legacy: TxLegacy::encode_with_signature - // - EIP-2930: TxEip2930::encode_with_signature - // - EIP-1559: TxEip1559::encode_with_signature - // - EIP-4844: BlobTransaction::encode_with_type_inner - // - EIP-7702: TxEip7702::encode_with_signature match self { - Self::Legacy { transaction, signature, .. } => transaction - .encode_with_signature_fields( - &with_eip155_parity(signature, transaction.chain_id), - out, - ), + Self::Legacy { transaction, signature, .. } => { + transaction.eip2718_encode(signature, out) + } Self::Eip2930 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } Self::Eip1559 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } Self::Eip7702 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } - Self::BlobTransaction(blob_tx) => { - // The inner encoding is used with `with_header` set to true, making the final - // encoding: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` - blob_tx.encode_with_type_inner(out, false); + Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { + transaction.eip2718_encode(signature, out) } } } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 5bd647d5393..48a02f4e740 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,10 +1,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Transaction, TransactionSigned}; -use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; +use alloy_consensus::{transaction::RlpEcdsaTx, TxEip4844WithSidecar}; use alloy_eips::eip4844::BlobTransactionSidecar; -use alloy_primitives::{Signature, TxHash}; -use alloy_rlp::Header; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -69,107 +68,6 @@ impl BlobTransaction { (transaction, self.transaction.sidecar) } - /// Encodes the [`BlobTransaction`] fields as RLP, with a tx type. If `with_header` is `false`, - /// the following will be encoded: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the following will be encoded: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - /// - /// NOTE: The header will be a byte string header, not a list header. - pub(crate) fn encode_with_type_inner(&self, out: &mut dyn bytes::BufMut, with_header: bool) { - // Calculate the length of: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs])` - // - // to construct and encode the string header - if with_header { - Header { - list: false, - // add one for the tx type - payload_length: 1 + self.payload_len(), - } - .encode(out); - } - - out.put_u8(EIP4844_TX_TYPE_ID); - - // Now we encode the inner blob transaction: - self.encode_inner(out); - } - - /// Encodes the [`BlobTransaction`] fields as RLP, with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding methods, and does not - /// represent the full RLP encoding of the blob transaction. - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - self.transaction.encode_with_signature_fields(&self.signature, out); - } - - /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, - /// optionally including the length of a wrapping string header. If `with_header` is `false`, - /// the length of the following will be calculated: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the length of the following will be calculated: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - pub(crate) fn payload_len_with_type(&self, with_header: bool) -> usize { - if with_header { - // Construct a header and use that to calculate the total length - let wrapped_header = Header { - list: false, - // add one for the tx type byte - payload_length: 1 + self.payload_len(), - }; - - // The total length is now the length of the header plus the length of the payload - // (which includes the tx type byte) - wrapped_header.length() + wrapped_header.payload_length - } else { - // Just add the length of the tx type to the payload length - 1 + self.payload_len() - } - } - - /// Outputs the length of the RLP encoding of the blob transaction with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding length methods, and - /// does not represent the full RLP encoding of the blob transaction. - pub(crate) fn payload_len(&self) -> usize { - // The `transaction_payload_body` length is the length of the fields, plus the length of - // its list header. - let tx_header = Header { - list: true, - payload_length: self.transaction.tx.fields_len() + self.signature.rlp_vrs_len(), - }; - - let tx_length = tx_header.length() + tx_header.payload_length; - - // The payload length is the length of the `tranascation_payload_body` list, plus the - // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.rlp_encoded_fields_length(); - - // We use the calculated payload len to construct the first list header, which encompasses - // everything in the tx - the length of the second, inner list header is part of - // payload_length - let blob_tx_header = Header { list: true, payload_length }; - - // The final length is the length of: - // * the outer blob tx header + - // * the inner tx header + - // * the inner tx fields + - // * the signature fields + - // * the sidecar fields - blob_tx_header.length() + blob_tx_header.payload_length - } - /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: /// `rlp([transaction_payload_body, blobs, commitments, proofs])` /// @@ -180,7 +78,7 @@ impl BlobTransaction { /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, signature, hash) = - TxEip4844WithSidecar::decode_signed_fields(data)?.into_parts(); + TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); Ok(Self { transaction, hash, signature }) } diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index b73206e6e77..ef4fab0fccb 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,6 +1,7 @@ use crate::transaction::util::secp256k1; -use alloy_primitives::{Address, Parity, Signature, B256, U256}; -use alloy_rlp::{Decodable, Error as RlpError}; +use alloy_consensus::transaction::from_eip155_value; +use alloy_primitives::{Address, PrimitiveSignature as Signature, B256, U256}; +use alloy_rlp::Decodable; /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. @@ -14,25 +15,23 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ pub(crate) fn decode_with_eip155_chain_id( buf: &mut &[u8], ) -> alloy_rlp::Result<(Signature, Option)> { - let v: Parity = Decodable::decode(buf)?; + let v: u64 = Decodable::decode(buf)?; let r: U256 = Decodable::decode(buf)?; let s: U256 = Decodable::decode(buf)?; - #[cfg(not(feature = "optimism"))] - if matches!(v, Parity::Parity(_)) { - return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")); - } - - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if matches!(v, Parity::Parity(false)) && r.is_zero() && s.is_zero() { - return Ok((Signature::new(r, s, Parity::Parity(false)), None)) - } + let Some((parity, chain_id)) = from_eip155_value(v) else { + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + #[cfg(feature = "optimism")] + if v == 0 && r.is_zero() && s.is_zero() { + return Ok((Signature::new(r, s, false), None)) + } + return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")) + }; - Ok((Signature::new(r, s, v), v.chain_id())) + Ok((Signature::new(r, s, parity), chain_id)) } /// Recover signer from message hash, _without ensuring that the signature has a low `s` @@ -46,7 +45,7 @@ pub fn recover_signer_unchecked(signature: &Signature, hash: B256) -> Option()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; // NOTE: we are removing error from underlying crypto library as it will restrain primitive // errors and we care only if recovery is passing or not. @@ -66,68 +65,15 @@ pub fn recover_signer(signature: &Signature, hash: B256) -> Option
{ recover_signer_unchecked(signature, hash) } -/// Returns [Parity] value based on `chain_id` for legacy transaction signature. -#[allow(clippy::missing_const_for_fn)] -pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { - if let Some(chain_id) = chain_id { - Parity::Parity(signature.v().y_parity()).with_chain_id(chain_id) - } else { - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == op_alloy_consensus::TxDeposit::signature() { - return Parity::Parity(false) - } - Parity::NonEip155(signature.v().y_parity()) - } -} - -/// Returns a signature with the given chain ID applied to the `v` value. -pub(crate) fn with_eip155_parity(signature: &Signature, chain_id: Option) -> Signature { - Signature::new(signature.r(), signature.s(), legacy_parity(signature, chain_id)) -} - -/// Outputs (`odd_y_parity`, `chain_id`) from the `v` value. -/// This doesn't check validity of the `v` value for optimism. -#[inline] -pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { - if v < 35 { - // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity - if v != 27 && v != 28 { - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) - } - Ok((v == 28, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((odd_y_parity, Some(chain_id))) - } -} - #[cfg(test)] mod tests { use crate::transaction::signature::{ - legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, + recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{hex, Address, Parity, Signature, B256, U256}; + use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; use std::str::FromStr; - #[test] - fn test_legacy_parity() { - // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(false)); - assert_eq!(Parity::NonEip155(false), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(37), legacy_parity(&signature, Some(1))); - - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(true)); - assert_eq!(Parity::NonEip155(true), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(38), legacy_parity(&signature, Some(1))); - } - #[test] fn test_recover_signer() { let signature = Signature::new( @@ -139,7 +85,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); let hash = B256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53") diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index ff2c2e0dab5..7964cc1c5f0 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, Signature}; +use alloy_primitives::{Address, PrimitiveSignature as Signature}; #[cfg(feature = "secp256k1")] pub(crate) mod secp256k1 { @@ -18,7 +18,7 @@ mod impl_secp256k1 { ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256, U256}; /// Recovers the address of the sender using secp256k1 pubkey recovery. /// @@ -44,7 +44,7 @@ mod impl_secp256k1 { let signature = Signature::new( U256::try_from_be_slice(&data[..32]).expect("The slice has at most 32 bytes"), U256::try_from_be_slice(&data[32..64]).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.to_i32() != 0), + rec_id.to_i32() != 0, ); Ok(signature) } @@ -62,7 +62,7 @@ mod impl_secp256k1 { #[cfg_attr(feature = "secp256k1", allow(unused, unreachable_pub))] mod impl_k256 { use super::*; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256}; pub(crate) use k256::ecdsa::Error; use k256::ecdsa::{RecoveryId, SigningKey, VerifyingKey}; @@ -92,15 +92,7 @@ mod impl_k256 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SigningKey::from_slice(secret.as_ref())?; - let (sig, rec_id) = sec.sign_prehash_recoverable(&message.0)?; - let (r, s) = sig.split_bytes(); - - let signature = Signature::new( - U256::try_from_be_slice(&r).expect("The slice has at most 32 bytes"), - U256::try_from_be_slice(&s).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.is_y_odd()), - ); - Ok(signature) + sec.sign_prehash_recoverable(&message.0).map(Into::into) } /// Converts a public key into an ethereum address by hashing the encoded public key with @@ -131,7 +123,7 @@ mod tests { let mut sig: [u8; 65] = [0; 65]; sig[0..32].copy_from_slice(&signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; assert_eq!(recover_signer_unchecked(&sig, &hash), Ok(signer)); } @@ -189,14 +181,14 @@ mod tests { sig[0..32].copy_from_slice(&secp256k1_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&secp256k1_signature.s().to_be_bytes::<32>()); - sig[64] = secp256k1_signature.v().y_parity_byte(); + sig[64] = secp256k1_signature.v() as u8; let secp256k1_recovered = impl_secp256k1::recover_signer_unchecked(&sig, &hash).expect("secp256k1 recover"); assert_eq!(secp256k1_recovered, secp256k1_signer); sig[0..32].copy_from_slice(&k256_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&k256_signature.s().to_be_bytes::<32>()); - sig[64] = k256_signature.v().y_parity_byte(); + sig[64] = k256_signature.v() as u8; let k256_recovered = impl_k256::recover_signer_unchecked(&sig, &hash).expect("k256 recover"); assert_eq!(k256_recovered, k256_signer); diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index bb8fd08ed87..a9794af004a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use alloy_eips::BlockId; -use alloy_rpc_types::{Header, Index}; +use alloy_rpc_types::{Block, Header, Index}; use futures::Future; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; +use reth_rpc_types_compat::block::from_block; use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; @@ -189,7 +189,7 @@ pub trait EthBlocks: LoadBlock { } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(uncle_block_from_header)) + Ok(uncles.into_iter().nth(index.into()).map(Block::uncle_from_header)) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b90577c1486..10148fbe78b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -5,6 +5,7 @@ use crate::{ AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, RpcNodeCore, }; +use alloy_consensus::BlockHeader; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types::{ @@ -125,9 +126,9 @@ pub trait EthCall: Call + LoadPendingBlock { let base_fee = if let Some(latest) = blocks.last() { let header = &latest.inner.header; calc_next_block_base_fee( - header.gas_used, - header.gas_limit, - header.base_fee_per_gas.unwrap_or_default(), + header.gas_used(), + header.gas_limit(), + header.base_fee_per_gas().unwrap_or_default(), base_fee_params, ) } else { @@ -192,19 +193,20 @@ pub trait EthCall: Call + LoadPendingBlock { results.push((env.tx.caller, res.result)); } - let block = simulate::build_block( - results, - transactions, - &block_env, - parent_hash, - total_difficulty, - return_full_transactions, - &db, - this.tx_resp_builder(), - )?; + let block: SimulatedBlock> = + simulate::build_block( + results, + transactions, + &block_env, + parent_hash, + total_difficulty, + return_full_transactions, + &db, + this.tx_resp_builder(), + )?; parent_hash = block.inner.header.hash; - gas_used += block.inner.header.gas_used; + gas_used += block.inner.header.gas_used(); blocks.push(block); } diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 36e9277400f..dc8beab38a0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -1,7 +1,7 @@ //! An abstraction over ethereum signers. use alloy_dyn_abi::TypedData; -use alloy_primitives::{Address, Signature}; +use alloy_primitives::{Address, PrimitiveSignature as Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; use reth_primitives::TransactionSigned; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 702572064c5..f3796b4b9bb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -74,7 +74,7 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { Ok(B256::new( this.state_at_block_id_or_latest(block_id)? - .storage(address, index.0) + .storage(address, index.as_b256()) .map_err(Self::Error::from_eth_err)? .unwrap_or_default() .to_be_bytes(), @@ -118,7 +118,7 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; - let storage_keys = keys.iter().map(|key| key.0).collect::>(); + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 1d176dd1e86..620f4523d21 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,6 +1,9 @@ //! Trait for specifying `eth` network dependent API types. -use std::{error::Error, fmt}; +use std::{ + error::Error, + fmt::{self}, +}; use alloy_network::Network; use alloy_rpc_types::Block; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 20952413c13..d881b854a79 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,7 +1,7 @@ //! Utilities for serving `eth_simulateV1` use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; -use alloy_primitives::{Parity, Signature}; +use alloy_primitives::PrimitiveSignature as Signature; use alloy_rpc_types::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, Block, BlockTransactionsKind, @@ -133,8 +133,7 @@ where }; // Create an empty signature for the transaction. - let signature = - Signature::new(Default::default(), Default::default(), Parity::Parity(false)); + let signature = Signature::new(Default::default(), Default::default(), false); let tx = match tx { TypedTransaction::Legacy(tx) => { @@ -170,7 +169,7 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::too_many_arguments)] +#[expect(clippy::complexity)] pub fn build_block( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -306,6 +305,6 @@ pub fn build_block( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; + let block = from_block::(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index a954e05e4f6..3b297ba0bc3 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,13 +1,12 @@ //! Compatibility functions for rpc `Block` type. +use alloy_consensus::Sealed; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types::{ Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, SealedHeader, Withdrawals, -}; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, Withdrawals}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -100,64 +99,6 @@ pub fn from_block_full( )) } -/// Converts from a [`reth_primitives::SealedHeader`] to a [`alloy-rpc-types::Header`] -/// -/// # Note -/// -/// This does not set the `totalDifficulty` field. -pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) -> Header { - let (header, hash) = primitive_header.split(); - let PrimitiveHeader { - parent_hash, - ommers_hash, - beneficiary, - state_root, - transactions_root, - receipts_root, - logs_bloom, - difficulty, - number, - gas_limit, - gas_used, - timestamp, - mix_hash, - nonce, - base_fee_per_gas, - extra_data, - withdrawals_root, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - requests_hash, - } = header; - - Header { - hash, - parent_hash, - uncles_hash: ommers_hash, - miner: beneficiary, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - number, - gas_used, - gas_limit, - extra_data, - logs_bloom, - timestamp, - difficulty, - mix_hash: Some(mix_hash), - nonce: Some(nonce), - base_fee_per_gas, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - total_difficulty: None, - requests_hash, - } -} - #[inline] fn from_block_with_transactions( block_length: usize, @@ -166,31 +107,19 @@ fn from_block_with_transactions( total_difficulty: U256, transactions: BlockTransactions, ) -> Block { - let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); - let mut header = from_primitive_with_hash(SealedHeader::new(block.header, block_hash)); - header.total_difficulty = Some(total_difficulty); - - let withdrawals = header + let withdrawals = block + .header .withdrawals_root .is_some() - .then(|| block.body.withdrawals.map(Withdrawals::into_inner)) + .then(|| block.body.withdrawals.map(Withdrawals::into_inner).map(Into::into)) .flatten(); - Block { header, uncles, transactions, size: Some(U256::from(block_length)), withdrawals } -} + let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); + let header = Header::from_consensus( + Sealed::new_unchecked(block.header, block_hash), + Some(total_difficulty), + Some(U256::from(block_length)), + ); -/// Build an RPC block response representing -/// an Uncle from its header. -pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { - let hash = header.hash_slow(); - let uncle_block = PrimitiveBlock { header, ..Default::default() }; - let size = Some(U256::from(uncle_block.length())); - let rpc_header = from_primitive_with_hash(SealedHeader::new(uncle_block.header, hash)); - Block { - uncles: vec![], - header: rpc_header, - transactions: BlockTransactions::Uncle, - withdrawals: None, - size, - } + Block { header, uncles, transactions, withdrawals } } diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs index 19bc76f3d7b..7bdf629e96a 100644 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ b/crates/rpc/rpc-types-compat/src/proof.rs @@ -6,7 +6,11 @@ use reth_trie_common::{AccountProof, StorageProof}; /// Creates a new rpc storage proof from a primitive storage proof type. pub fn from_primitive_storage_proof(proof: StorageProof) -> EIP1186StorageProof { - EIP1186StorageProof { key: JsonStorageKey(proof.key), value: proof.value, proof: proof.proof } + EIP1186StorageProof { + key: JsonStorageKey::Hash(proof.key), + value: proof.value, + proof: proof.proof, + } } /// Creates a new rpc account proof from a primitive account proof type. diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 16742144f25..27f0b0288d5 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,7 +1,4 @@ //! Compatibility functions for rpc `Transaction` type. -mod signature; - -pub use signature::*; use std::fmt; @@ -10,7 +7,7 @@ use alloy_rpc_types::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; +use reth_primitives::TransactionSignedEcRecovered; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -44,36 +41,8 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { + Sync + Unpin + Clone - + Default + fmt::Debug; - /// - /// Formats gas price and max fee per gas for RPC transaction response w.r.t. network specific - /// transaction type. - fn gas_price(signed_tx: &TransactionSigned, base_fee: Option) -> GasPrice { - #[allow(unreachable_patterns)] - match signed_tx.tx_type() { - TxType::Legacy | TxType::Eip2930 => { - GasPrice { gas_price: Some(signed_tx.max_fee_per_gas()), max_fee_per_gas: None } - } - TxType::Eip1559 | TxType::Eip4844 | TxType::Eip7702 => { - // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + - // baseFee` - let gas_price = base_fee - .and_then(|base_fee| { - signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) - }) - .unwrap_or_else(|| signed_tx.max_fee_per_gas()); - - GasPrice { - gas_price: Some(gas_price), - max_fee_per_gas: Some(signed_tx.max_fee_per_gas()), - } - } - _ => GasPrice::default(), - } - } - /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; @@ -82,19 +51,6 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { // todo: remove in favour of using constructor on `TransactionResponse` or similar // . fn otterscan_api_truncate_input(tx: &mut Self::Transaction); - - /// Returns the transaction type. - // todo: remove when alloy TransactionResponse trait it updated. - fn tx_type(tx: &Self::Transaction) -> u8; -} - -/// Gas price and max fee per gas for a transaction. Helper type to format transaction RPC response. -#[derive(Debug, Default)] -pub struct GasPrice { - /// Gas price for transaction. - pub gas_price: Option, - /// Max fee per gas for transaction. - pub max_fee_per_gas: Option, } /// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index c6c60312730..e7e9c64447b 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -6,7 +6,7 @@ use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; -use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; +use alloy_primitives::{eip191_hash_message, Address, PrimitiveSignature as Signature, B256}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; @@ -109,7 +109,7 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { - use alloy_primitives::{Bytes, Parity, U256}; + use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; use revm_primitives::TxKind; @@ -205,7 +205,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(sig, expected) } @@ -227,7 +227,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(true), + true, ); assert_eq!(sig, expected) } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 0998c057e29..b86e32f046f 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,14 +1,10 @@ //! L1 `eth` API types. -use alloy_consensus::Transaction as _; +use alloy_consensus::{Signed, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; -use alloy_primitives::{Address, TxKind}; use alloy_rpc_types::{Transaction, TransactionInfo}; -use reth_primitives::TransactionSignedEcRecovered; -use reth_rpc_types_compat::{ - transaction::{from_primitive_signature, GasPrice}, - TransactionCompat, -}; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_types_compat::TransactionCompat; /// Builds RPC transaction response for l1. #[derive(Debug, Clone, Copy)] @@ -25,65 +21,46 @@ where tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, ) -> Self::Transaction { - let signer = tx.signer(); - let signed_tx = tx.into_signed(); + let from = tx.signer(); + let TransactionSigned { transaction, signature, hash } = tx.into_signed(); - let to: Option
= match signed_tx.kind() { - TxKind::Create => None, - TxKind::Call(to) => Some(Address(*to)), + let inner = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + #[allow(unreachable_patterns)] + _ => unreachable!(), }; - let TransactionInfo { - base_fee, block_hash, block_number, index: transaction_index, .. - } = tx_info; + let TransactionInfo { block_hash, block_number, index: transaction_index, .. } = tx_info; - let GasPrice { gas_price, max_fee_per_gas } = - Self::gas_price(&signed_tx, base_fee.map(|fee| fee as u64)); - - let input = signed_tx.input().to_vec().into(); - let chain_id = signed_tx.chain_id(); - let blob_versioned_hashes = signed_tx.blob_versioned_hashes().map(|hs| hs.to_vec()); - let access_list = signed_tx.access_list().cloned(); - let authorization_list = signed_tx.authorization_list().map(|l| l.to_vec()); - - let signature = from_primitive_signature( - *signed_tx.signature(), - signed_tx.tx_type(), - signed_tx.chain_id(), - ); - - Transaction { - hash: signed_tx.hash(), - nonce: signed_tx.nonce(), - from: signer, - to, - value: signed_tx.value(), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), - signature: Some(signature), - gas: signed_tx.gas_limit(), - input, - chain_id, - access_list, - transaction_type: Some(signed_tx.tx_type() as u8), - // These fields are set to None because they are not stored as part of the - // transaction - block_hash, - block_number, - transaction_index, - // EIP-4844 fields - max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), - blob_versioned_hashes, - authorization_list, - } + Transaction { inner, block_hash, block_number, transaction_index, from } } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.input = tx.input.slice(..4); - } - - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.transaction_type.unwrap_or(0) + let input = match &mut tx.inner { + TxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + TxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip4844(tx) => match tx.tx_mut() { + TxEip4844Variant::TxEip4844(tx) => &mut tx.input, + TxEip4844Variant::TxEip4844WithSidecar(tx) => &mut tx.tx.input, + }, + TxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 663ec0b99d6..922694cdba6 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -329,7 +329,7 @@ where self.chain_events.canonical_state_stream().flat_map(|new_chain| { let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( - headers.into_iter().map(reth_rpc_types_compat::block::from_primitive_with_hash), + headers.into_iter().map(|h| Header::from_consensus(h.into(), None, None)), ) }) } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 54ddaaaad50..0585ef76459 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -165,7 +165,7 @@ where } /// Handler for `ots_getBlockDetails` - async fn get_block_details(&self, block_number: u64) -> RpcResult { + async fn get_block_details(&self, block_number: u64) -> RpcResult> { let block_id = block_number.into(); let block = self.eth.block_by_number(block_id, true); let block_id = block_id.into(); @@ -178,7 +178,7 @@ where } /// Handler for `getBlockDetailsByHash` - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult { + async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult> { let block = self.eth.block_by_hash(block_hash, true); let block_id = block_hash.into(); let receipts = self.eth.block_receipts(block_id); @@ -195,7 +195,7 @@ where block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult>> { + ) -> RpcResult, Header>> { let block_id = block_number.into(); // retrieve full block and its receipts let block = self.eth.block_by_number(block_id, true); @@ -239,7 +239,7 @@ where let timestamp = Some(block.header.timestamp); let receipts = receipts .drain(page_start..page_end) - .zip(transactions.iter().map(Eth::TransactionCompat::tx_type)) + .zip(transactions.iter().map(Transaction::ty)) .map(|(receipt, tx_ty)| { let inner = OtsReceipt { status: receipt.status(), diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index e17c0fb32a1..15285f36047 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -85,12 +85,11 @@ mod tests { nonce: 1, } .into_signed( - alloy_primitives::Signature::from_rs_and_parity( + alloy_primitives::PrimitiveSignature::new( b256!("1fd474b1f9404c0c5df43b7620119ffbc3a1c3f942c73b6e14e9f55255ed9b1d").into(), b256!("29aca24813279a901ec13b5f7bb53385fa1fc627b946592221417ff74a49600d").into(), false, ) - .unwrap(), ); let mut compacted_authorization = Vec::::new(); let len = authorization.to_compact(&mut compacted_authorization); diff --git a/crates/storage/codecs/src/alloy/signature.rs b/crates/storage/codecs/src/alloy/signature.rs index 0cc4774d0f8..b8fd19cf35a 100644 --- a/crates/storage/codecs/src/alloy/signature.rs +++ b/crates/storage/codecs/src/alloy/signature.rs @@ -1,7 +1,7 @@ //! Compact implementation for [`Signature`] use crate::Compact; -use alloy_primitives::{Parity, Signature, U256}; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; impl Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize @@ -10,7 +10,7 @@ impl Compact for Signature { { buf.put_slice(&self.r().as_le_bytes()); buf.put_slice(&self.s().as_le_bytes()); - self.v().y_parity() as usize + self.v() as usize } fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { @@ -19,6 +19,6 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Self::new(r, s, Parity::Parity(identifier != 0)), buf) + (Self::new(r, s, identifier != 0), buf) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 0784bdb6346..786a2f2b108 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1388,7 +1388,7 @@ impl StorageChangeSetReader for ConsistentProvider { .bundle .reverts .clone() - .into_plain_state_reverts() + .to_plain_state_reverts() .storage .into_iter() .flatten() @@ -1441,7 +1441,7 @@ impl ChangeSetReader for ConsistentProvider { .bundle .reverts .clone() - .into_plain_state_reverts() + .to_plain_state_reverts() .accounts .into_iter() .flatten() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d47532e712d..266f98aae37 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1704,7 +1704,7 @@ impl> TransactionsProviderE rlp_buf: &mut Vec, ) -> Result<(B256, TxNumber), Box> { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.transaction.eip2718_encode(&tx.signature, rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 70c1e38f6ac..9ccaf051463 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1671,6 +1671,6 @@ fn calculate_hash( rlp_buf: &mut Vec, ) -> Result<(B256, TxNumber), Box> { let (tx_id, tx) = entry; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.transaction.eip2718_encode(&tx.signature, rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 19f885e27a8..8439aef1609 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -2,12 +2,12 @@ use crate::{DatabaseProviderRW, ExecutionOutcome}; use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ - b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, - TxKind, B256, U256, + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Sealable, TxKind, B256, + U256, }; use alloy_eips::eip4895::Withdrawal; -use alloy_primitives::Signature; +use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; @@ -99,7 +99,7 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo "29056683545955299640297374067888344259176096769870751649153779895496107008675", ) .unwrap(), - Parity::NonEip155(false), + false, ), transaction: Transaction::Legacy(TxLegacy { gas_price: 10, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 5b16b2da4e5..37092a5dd51 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -523,7 +523,7 @@ where is_value_known: OriginalValuesKnown, ) -> ProviderResult<()> { let (plain_state, reverts) = - execution_outcome.bundle.into_plain_state_and_reverts(is_value_known); + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); self.database().write_state_reverts(reverts, execution_outcome.first_block)?; @@ -664,8 +664,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); assert!(plain_state.storage.is_empty()); assert!(plain_state.contracts.is_empty()); provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); @@ -722,8 +722,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); // Account B selfdestructed so flag for it should be present. assert_eq!( plain_state.storage, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index c97632c7dcd..92f74665279 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -16,7 +16,9 @@ use alloy_eips::{ eip2930::AccessList, eip4844::{BlobTransactionSidecar, BlobTransactionValidationError, DATA_GAS_PER_BLOB}, }; -use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, +}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6be25cb2ecc..aa99e7af615 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1506,7 +1506,7 @@ mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; use alloy_eips::eip4844::DATA_GAS_PER_BLOB; - use alloy_primitives::Signature; + use alloy_primitives::PrimitiveSignature as Signature; use reth_primitives::TransactionSigned; #[test] diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 83fcf4484a0..57c9acedfea 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -2,7 +2,7 @@ use alloy_consensus::{Transaction as _, TxLegacy}; use alloy_eips::eip4895::Withdrawal; -use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; +use alloy_primitives::{Address, BlockNumber, Bytes, Sealable, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, @@ -148,17 +148,9 @@ pub fn sign_tx_with_random_key_pair(rng: &mut R, tx: Transaction) -> Tra /// Signs the [Transaction] with the given key pair. pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { - let mut signature = + let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - if matches!(tx, Transaction::Legacy(_)) { - signature = if let Some(chain_id) = tx.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(Parity::NonEip155(signature.v().y_parity())) - } - } - TransactionSigned::from_transaction_and_signature(tx, signature) } @@ -464,7 +456,7 @@ mod tests { use super::*; use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::{hex, Parity, Signature}; + use alloy_primitives::{hex, PrimitiveSignature as Signature}; use reth_primitives::public_key_to_address; use std::str::FromStr; @@ -538,7 +530,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(expected, signature); } From c2e8e2f4f95d2b82b491ed9f1b8676f5f944d855 Mon Sep 17 00:00:00 2001 From: Jun Song <87601811+syjn99@users.noreply.github.com> Date: Thu, 7 Nov 2024 01:53:45 +0900 Subject: [PATCH 340/970] Add `queued_outgoing_messages` panel for grafana (#12306) --- etc/grafana/dashboards/reth-mempool.json | 104 ++++++++++++++++++++++- 1 file changed, 103 insertions(+), 1 deletion(-) diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index ebb693184a5..bba5dbd0e22 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1493,6 +1493,108 @@ "title": "Incoming Gossip and Requests", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Measures the message send rate (MPS) for queued outgoing messages. Outgoing messages are added to the queue before being sent to other peers, and this metric helps track the rate of message dispatch.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_queued_outgoing_messages{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Queued Messages per Second", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Queued Outgoing Messages", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -2931,7 +3033,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, + "x": 0, "y": 69 }, "id": 214, From dc0a6007e3cd4bb75a6d6e6152cc097a2fcc8f91 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 7 Nov 2024 01:24:18 +0700 Subject: [PATCH 341/970] fix(pool-args): `saturating_mul` max sizes to avoid overflow (#12350) --- crates/node/core/src/args/txpool.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 538315101ad..2e691aaa963 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -125,19 +125,19 @@ impl RethTransactionPoolConfig for TxPoolArgs { }, pending_limit: SubPoolLimit { max_txs: self.pending_max_count, - max_size: self.pending_max_size * 1024 * 1024, + max_size: self.pending_max_size.saturating_mul(1024 * 1024), }, basefee_limit: SubPoolLimit { max_txs: self.basefee_max_count, - max_size: self.basefee_max_size * 1024 * 1024, + max_size: self.basefee_max_size.saturating_mul(1024 * 1024), }, queued_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, blob_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, max_account_slots: self.max_account_slots, price_bumps: PriceBumpConfig { From e084bed089667d5da471db8638c31aaeef52966a Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 6 Nov 2024 13:22:00 -0600 Subject: [PATCH 342/970] renamed OptimismAddOns to OpAddOns (#12348) --- crates/optimism/bin/src/main.rs | 4 ++-- crates/optimism/node/src/node.rs | 17 ++++++++--------- crates/optimism/node/tests/e2e/utils.rs | 4 ++-- crates/optimism/node/tests/it/builder.rs | 4 ++-- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 840da3bcf0b..6494298ba39 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -5,7 +5,7 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OpNode}; +use reth_optimism_node::{args::RollupArgs, node::OpAddOns, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -36,7 +36,7 @@ fn main() { let handle = builder .with_types_and_provider::>() .with_components(OpNode::components(rollup_args)) - .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) + .with_add_ons(OpAddOns::new(sequencer_http_arg)) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 2e1f71a5175..0eb024fe231 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -105,9 +105,8 @@ where OpConsensusBuilder, >; - type AddOns = OptimismAddOns< - NodeAdapter>::Components>, - >; + type AddOns = + OpAddOns>::Components>>; fn components_builder(&self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -115,7 +114,7 @@ where } fn add_ons(&self) -> Self::AddOns { - OptimismAddOns::new(self.args.sequencer_http.clone()) + OpAddOns::new(self.args.sequencer_http.clone()) } } @@ -131,24 +130,24 @@ impl NodeTypesWithEngine for OpNode { /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OptimismAddOns( +pub struct OpAddOns( pub RpcAddOns, OptimismEngineValidatorBuilder>, ); -impl Default for OptimismAddOns { +impl Default for OpAddOns { fn default() -> Self { Self::new(None) } } -impl OptimismAddOns { +impl OpAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) } } -impl NodeAddOns for OptimismAddOns +impl NodeAddOns for OpAddOns where N: FullNodeComponents>, OpEngineValidator: EngineValidator<::Engine>, @@ -163,7 +162,7 @@ where } } -impl RethRpcAddOns for OptimismAddOns +impl RethRpcAddOns for OpAddOns where N: FullNodeComponents>, OpEngineValidator: EngineValidator<::Engine>, diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index a8afab87ec2..c3b6acddc5a 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -6,14 +6,14 @@ use reth_e2e_test_utils::{ }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ - node::OptimismAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes, + node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes, }; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; +pub(crate) type OpNode = NodeHelperType>>; pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 3bd2da75557..67cac17d398 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -4,7 +4,7 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OptimismAddOns, OpNode}; +use reth_optimism_node::{node::OpAddOns, OpNode}; #[test] fn test_basic_setup() { @@ -15,7 +15,7 @@ fn test_basic_setup() { .with_database(db) .with_types::() .with_components(OpNode::components(Default::default())) - .with_add_ons(OptimismAddOns::new(None)) + .with_add_ons(OpAddOns::new(None)) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) From 2c5ba732b7fe6b653c973157bf88106be22b8c5f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 6 Nov 2024 20:27:33 +0100 Subject: [PATCH 343/970] feat(engine): integrate executor with StateRootTask (#12335) Co-authored-by: Roman Krasiuk --- Cargo.lock | 1 + crates/engine/tree/Cargo.toml | 1 + crates/engine/tree/src/tree/mod.rs | 15 ++++-- crates/engine/tree/src/tree/root.rs | 84 +++++++++++++++++++++++++---- 4 files changed, 86 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fba1e61e037..dcd29b36a93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7209,6 +7209,7 @@ dependencies = [ "assert_matches", "futures", "metrics", + "pin-project", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 293883c036e..2ce18aa0e7d 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -43,6 +43,7 @@ revm-primitives.workspace = true # common futures.workspace = true +pin-project.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } tokio-stream.workspace = true thiserror.workspace = true diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 36108e63bf8..11dd95e5583 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -57,9 +57,8 @@ use std::{ time::Instant, }; use tokio::sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, - oneshot::error::TryRecvError, + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot::{self, error::TryRecvError}, }; use tracing::*; @@ -612,7 +611,7 @@ where remove_above_state: VecDeque::new(), }; - let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); + let (tx, outgoing) = unbounded_channel(); let state = EngineApiTreeState::new( config.block_buffer_limit(), config.max_invalid_header_cache_length(), @@ -2188,6 +2187,7 @@ where let block = block.unseal(); let exec_time = Instant::now(); + // TODO: create StateRootTask with the receiving end of a channel and // pass the sending end of the channel to the state hook. let noop_state_hook = |_result_and_state: &ResultAndState| {}; @@ -2198,6 +2198,7 @@ where )?; trace!(target: "engine::tree", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); + if let Err(err) = self.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&output.receipts, &output.requests), @@ -2218,6 +2219,8 @@ where let root_time = Instant::now(); let mut state_root_result = None; + // TODO: switch to calculate state root using `StateRootTask`. + // We attempt to compute state root in parallel if we are currently not persisting anything // to database. This is safe, because the database state cannot change until we // finish parallel computation. It is important that nothing is being persisted as @@ -2305,6 +2308,9 @@ where parent_hash: B256, hashed_state: &HashedPostState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { + // TODO: when we switch to calculate state root using `StateRootTask` this + // method can be still useful to calculate the required `TrieInput` to + // create the task. let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; let mut input = TrieInput::default(); @@ -2607,7 +2613,6 @@ mod tests { str::FromStr, sync::mpsc::{channel, Sender}, }; - use tokio::sync::mpsc::unbounded_channel; /// This is a test channel that allows you to `release` any value that is in the channel. /// diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 48b2eccdf14..dc039d418eb 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,5 +1,7 @@ //! State root task related functionality. +use futures::Stream; +use pin_project::pin_project; use reth_provider::providers::ConsistentDbView; use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::parallel_root::ParallelStateRootError; @@ -7,31 +9,55 @@ use revm_primitives::{EvmState, B256}; use std::{ future::Future, pin::Pin, - sync::Arc, + sync::{mpsc, Arc}, task::{Context, Poll}, }; use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::debug; + +/// Result of the state root calculation +pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; + +/// Handle to a spawned state root task. +#[derive(Debug)] +#[allow(dead_code)] +pub(crate) struct StateRootHandle { + /// Channel for receiving the final result. + rx: mpsc::Receiver, +} + +#[allow(dead_code)] +impl StateRootHandle { + /// Waits for the state root calculation to complete. + pub(crate) fn wait_for_result(self) -> StateRootResult { + self.rx.recv().expect("state root task was dropped without sending result") + } +} /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// -/// It is responsile of initializing a blinded sparse trie and subscribe to +/// It is responsible of initializing a blinded sparse trie and subscribe to /// transaction state stream. As it receives transaction execution results, it /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[allow(dead_code)] +#[pin_project] pub(crate) struct StateRootTask { /// View over the state in the database. consistent_view: ConsistentDbView, /// Incoming state updates. + #[pin] state_stream: UnboundedReceiverStream, /// Latest trie input. input: Arc, } #[allow(dead_code)] -impl StateRootTask { +impl StateRootTask +where + Factory: Send + 'static, +{ /// Creates a new `StateRootTask`. pub(crate) const fn new( consistent_view: ConsistentDbView, @@ -41,20 +67,58 @@ impl StateRootTask { Self { consistent_view, state_stream, input } } + /// Spawns the state root task and returns a handle to await its result. + pub(crate) fn spawn(self) -> StateRootHandle { + let (tx, rx) = mpsc::channel(); + + // Spawn the task that will process state updates and calculate the root + tokio::spawn(async move { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.await; + let _ = tx.send(result); + }); + + StateRootHandle { rx } + } + /// Handles state updates. - pub(crate) fn on_state_update(&self, _update: EvmState) { + fn on_state_update( + _view: &ConsistentDbView, + _input: &Arc, + _state: EvmState, + ) { // TODO: calculate hashed state update and dispatch proof gathering for it. } } -impl Future for StateRootTask { - type Output = Result<(B256, TrieUpdates), ParallelStateRootError>; +impl Future for StateRootTask +where + Factory: Send + 'static, +{ + type Output = StateRootResult; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + // Process all items until the stream is closed + loop { + match this.state_stream.as_mut().poll_next(cx) { + Poll::Ready(Some(state)) => { + Self::on_state_update(this.consistent_view, this.input, state); + } + Poll::Ready(None) => { + // stream closed, return final result + return Poll::Ready(Ok((B256::default(), TrieUpdates::default()))); + } + Poll::Pending => { + return Poll::Pending; + } + } + } - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { // TODO: - // * poll incoming state updates stream // * keep track of proof calculation // * keep track of intermediate root computation - Poll::Pending + // * return final state root result } } From fe2b02828d62aa430a180e6b7407ffaee11de538 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 6 Nov 2024 21:30:49 +0100 Subject: [PATCH 344/970] feat: use 1559 functions directly (#12356) --- Cargo.lock | 25 ++++++++++---------- Cargo.toml | 8 +++---- crates/optimism/chainspec/src/lib.rs | 6 ++--- crates/optimism/node/src/engine.rs | 16 ++++++------- crates/optimism/payload/src/builder.rs | 11 +-------- crates/optimism/payload/src/error.rs | 14 ----------- crates/optimism/payload/src/payload.rs | 32 ++++---------------------- crates/optimism/rpc/src/eth/receipt.rs | 2 +- 8 files changed, 33 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcd29b36a93..cdc31489b34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5274,9 +5274,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae4582945fa96ae0ed78babcac6e41f025460e30ed0c9781aaeedf878fc2b527" +checksum = "dabf6e7d7d63b2c6ed746b24d334e16388c6c3921bc50172440c72aa923c6b4a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5292,9 +5292,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1ece4a037c56536d8b517d045cef9cc07364c578709c184d33817108309c31e" +checksum = "f901aa077832e22820c644d63d2e5e48601eed0f06e40f2a26d1b2a89bd17dec" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5306,9 +5306,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e9b64d15a7bf27a06c16eb286349aa2d4e3173260a8ab1fe73bd2c13c89769" +checksum = "8b2e7db7997b12c1f364a3bd54b35338357f44c8e2e533a81ebf625104d80110" dependencies = [ "alloy-consensus", "alloy-network", @@ -5321,9 +5321,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa989d1ea8deced466b0edd7a447264b1f934fd740ab895d32b8544dcce3b151" +checksum = "b9226c7618f45f1d1e1f1112230818d5cfa719da9f5ca05fa28eaeb44d024181" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5341,9 +5341,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6e53039829ff0b3482d8dd02cb2de45d5c7b889023c7e4588a43ea7451664a" +checksum = "d60079165fe9a4be99b04865d8746c6c9c7b505be2fdce8982f677ca18c3cc10" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5360,9 +5360,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "283b19e1e7fef1ca9078df39f45a48609cacf856b7b441ed6cf19301ed162cca" +checksum = "59a5b505325e343b299b1c574b2b8542f6ac3101e0d92a1c909b2d7dd74665f1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5370,6 +5370,7 @@ dependencies = [ "alloy-serde", "derive_more 1.0.0", "ethereum_ssz", + "op-alloy-consensus", "op-alloy-protocol", "serde", "snap", diff --git a/Cargo.toml b/Cargo.toml index b8734afe664..8ff8c1eb7fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -468,10 +468,10 @@ alloy-transport-ipc = { version = "0.6.0", default-features = false } alloy-transport-ws = { version = "0.6.0", default-features = false } # op -op-alloy-rpc-types = "0.6" -op-alloy-rpc-types-engine = "0.6" -op-alloy-network = "0.6" -op-alloy-consensus = "0.6" +op-alloy-rpc-types = "0.6.2" +op-alloy-rpc-types-engine = "0.6.2" +op-alloy-network = "0.6.2" +op-alloy-consensus = "0.6.2" # misc aquamarine = "0.6" diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index a60a9a22abc..aa59e9ab3f8 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -436,14 +436,14 @@ impl From for OpChainSpec { #[derive(Default, Debug)] struct OpGenesisInfo { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo, + optimism_chain_info: op_alloy_rpc_types::OpChainInfo, base_fee_params: BaseFeeParamsKind, } impl OpGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { let mut info = Self { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo::extract_from( + optimism_chain_info: op_alloy_rpc_types::OpChainInfo::extract_from( &genesis.config.extra_fields, ) .unwrap_or_default(), @@ -852,7 +852,7 @@ mod tests { #[test] fn parse_genesis_optimism_with_variable_base_fee_params() { - use op_alloy_rpc_types::genesis::OpBaseFeeInfo; + use op_alloy_rpc_types::OpBaseFeeInfo; let geth_genesis = r#" { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index eb356e86e1d..e337a23551e 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -15,9 +15,7 @@ use reth_node_api::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; -use reth_optimism_payload_builder::{ - builder::decode_eip_1559_params, OpBuiltPayload, OpPayloadBuilderAttributes, -}; +use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -151,12 +149,12 @@ where if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) { - let Some(eip_1559_params) = attributes.eip_1559_params else { - return Err(EngineObjectValidationError::InvalidParams( - "MissingEip1559ParamsInPayloadAttributes".to_string().into(), - )) - }; - let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); + let (elasticity, denominator) = + attributes.decode_eip_1559_params().ok_or_else(|| { + EngineObjectValidationError::InvalidParams( + "MissingEip1559ParamsInPayloadAttributes".to_string().into(), + ) + })?; if elasticity != 0 && denominator == 0 { return Err(EngineObjectValidationError::InvalidParams( "Eip1559ParamsDenominatorZero".to_string().into(), diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index f0c6c04ce73..76c48c09bbb 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -4,7 +4,7 @@ use std::{fmt::Display, sync::Arc}; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::{Address, Bytes, B64, U256}; +use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; @@ -831,12 +831,3 @@ where Ok(None) } } - -/// Extracts the Holocene 1599 parameters from the encoded form: -/// -pub fn decode_eip_1559_params(eip_1559_params: B64) -> (u32, u32) { - let denominator: [u8; 4] = eip_1559_params.0[..4].try_into().expect("sufficient length"); - let elasticity: [u8; 4] = eip_1559_params.0[4..8].try_into().expect("sufficient length"); - - (u32::from_be_bytes(elasticity), u32::from_be_bytes(denominator)) -} diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index ce5f584a1ce..2016fdc6dd9 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -21,17 +21,3 @@ pub enum OptimismPayloadBuilderError { #[error("blob transaction included in sequencer block")] BlobTransactionRejected, } - -/// Error type for EIP-1559 parameters -#[derive(Debug, thiserror::Error)] -pub enum EIP1559ParamError { - /// No EIP-1559 parameters provided - #[error("No EIP-1559 parameters provided")] - NoEIP1559Params, - /// Denominator overflow - #[error("Denominator overflow")] - DenominatorOverflow, - /// Elasticity overflow - #[error("Elasticity overflow")] - ElasticityOverflow, -} diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 5acac70e914..f3576407909 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -1,8 +1,5 @@ //! Payload related types -//! Optimism builder support - -use crate::{builder::decode_eip_1559_params, error::EIP1559ParamError}; use alloy_eips::{ eip1559::BaseFeeParams, eip2718::Decodable2718, eip4844::BlobTransactionSidecar, eip7685::Requests, @@ -10,6 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; +use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; @@ -46,31 +44,9 @@ impl OpPayloadBuilderAttributes { &self, default_base_fee_params: BaseFeeParams, ) -> Result { - let eip_1559_params = self.eip_1559_params.ok_or(EIP1559ParamError::NoEIP1559Params)?; - - let mut extra_data = [0u8; 9]; - // If eip 1559 params aren't set, use the canyon base fee param constants - // otherwise use them - if eip_1559_params.is_zero() { - // Try casting max_change_denominator to u32 - let max_change_denominator: u32 = (default_base_fee_params.max_change_denominator) - .try_into() - .map_err(|_| EIP1559ParamError::DenominatorOverflow)?; - - // Try casting elasticity_multiplier to u32 - let elasticity_multiplier: u32 = (default_base_fee_params.elasticity_multiplier) - .try_into() - .map_err(|_| EIP1559ParamError::ElasticityOverflow)?; - - // Copy the values safely - extra_data[1..5].copy_from_slice(&max_change_denominator.to_be_bytes()); - extra_data[5..9].copy_from_slice(&elasticity_multiplier.to_be_bytes()); - } else { - let (elasticity, denominator) = decode_eip_1559_params(eip_1559_params); - extra_data[1..5].copy_from_slice(&denominator.to_be_bytes()); - extra_data[5..9].copy_from_slice(&elasticity.to_be_bytes()); - } - Ok(Bytes::copy_from_slice(&extra_data)) + self.eip_1559_params + .map(|params| decode_holocene_extra_data(params, default_base_fee_params)) + .ok_or(EIP1559ParamError::NoEIP1559Params)? } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f8e6b7fc21e..3563d4ae45d 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -5,7 +5,7 @@ use alloy_rpc_types::{Log, TransactionReceipt}; use op_alloy_consensus::{ DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, }; -use op_alloy_rpc_types::{receipt::L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; From 302ed291e4b8b56198e4e2d85113e9ff78cc9dd3 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 7 Nov 2024 03:33:49 +0700 Subject: [PATCH 345/970] perf(`EthBuiltPayload`): `Arc` `SealedBlock` (#12351) --- .../ethereum/engine-primitives/src/payload.rs | 19 +++++++++++-------- crates/ethereum/payload/src/lib.rs | 4 ++-- crates/payload/builder/src/lib.rs | 5 +++-- crates/payload/builder/src/test_utils.rs | 3 ++- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ed377d003dd..21c544f16ba 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -13,7 +13,7 @@ use reth_primitives::{SealedBlock, Withdrawals}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; -use std::convert::Infallible; +use std::{convert::Infallible, sync::Arc}; /// Contains the built payload. /// @@ -25,7 +25,7 @@ pub struct EthBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -45,7 +45,7 @@ impl EthBuiltPayload { /// Caution: This does not set any [`BlobTransactionSidecar`]. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, executed_block: Option, requests: Option, @@ -59,7 +59,7 @@ impl EthBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -127,7 +127,7 @@ impl BuiltPayload for &EthBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - block_to_payload_v1(value.block) + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } @@ -136,7 +136,10 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } @@ -145,7 +148,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let EthBuiltPayload { block, fees, sidecars, .. } = value; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -166,7 +169,7 @@ impl From for ExecutionPayloadEnvelopeV4 { let EthBuiltPayload { block, fees, sidecars, requests, .. } = value; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index ed33292ef98..403f35d8eff 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -439,12 +439,12 @@ where body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; - let sealed_block = block.seal_slow(); + let sealed_block = Arc::new(block.seal_slow()); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); // create the executed block data let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), + block: sealed_block.clone(), senders: Arc::new(executed_senders), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 2c46a4a9e16..57a040a4bb4 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -26,6 +26,7 @@ //! ``` //! use std::future::Future; //! use std::pin::Pin; +//! use std::sync::Arc; //! use std::task::{Context, Poll}; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; @@ -56,7 +57,7 @@ //! //! fn best_payload(&self) -> Result { //! // NOTE: some fields are omitted here for brevity -//! let payload = Block { +//! let block = Block { //! header: Header { //! parent_hash: self.attributes.parent, //! timestamp: self.attributes.timestamp, @@ -65,7 +66,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, Arc::new(block.seal_slow()), U256::ZERO, None, None); //! Ok(payload) //! } //! diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 676e60d912f..746853b74f0 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -13,6 +13,7 @@ use reth_provider::CanonStateNotification; use std::{ future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; @@ -86,7 +87,7 @@ impl PayloadJob for TestPayloadJob { fn best_payload(&self) -> Result { Ok(EthBuiltPayload::new( self.attr.payload_id(), - Block::default().seal_slow(), + Arc::new(Block::default().seal_slow()), U256::ZERO, Some(ExecutedBlock::default()), Some(Default::default()), From 2d945292acee7a65460e24de81756b8a34ce0e80 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:22:34 -0600 Subject: [PATCH 346/970] renamed OptimismEngineValidatorBuilder to OpEngineValidatorBuilder (#12359) --- crates/optimism/node/src/node.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0eb024fe231..541c9bcd483 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -130,9 +130,7 @@ impl NodeTypesWithEngine for OpNode { /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OpAddOns( - pub RpcAddOns, OptimismEngineValidatorBuilder>, -); +pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); impl Default for OpAddOns { fn default() -> Self { @@ -479,9 +477,9 @@ where /// Builder for [`OpEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismEngineValidatorBuilder; +pub struct OpEngineValidatorBuilder; -impl EngineValidatorBuilder for OptimismEngineValidatorBuilder +impl EngineValidatorBuilder for OpEngineValidatorBuilder where Types: NodeTypesWithEngine, Node: FullNodeComponents, From 8e8a1a827d240166d93d00423deae677059b49c4 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 7 Nov 2024 02:56:16 -0600 Subject: [PATCH 347/970] renamed OptimismPayloadBuilderError to OpPayloadBuilderError (#12364) --- crates/optimism/payload/src/builder.rs | 12 +++++------- crates/optimism/payload/src/error.rs | 2 +- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 76c48c09bbb..f7fdf157ce3 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -35,7 +35,7 @@ use revm::{ use tracing::{debug, trace, warn}; use crate::{ - error::OptimismPayloadBuilderError, + error::OpPayloadBuilderError, payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; @@ -589,7 +589,7 @@ impl OpPayloadBuilderCtx { ) .map_err(|err| { warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); - PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) + PayloadBuilderError::other(OpPayloadBuilderError::ForceCreate2DeployerFail) }) } } @@ -640,7 +640,7 @@ where // A sequencer's block should never contain blob transactions. if sequencer_tx.value().is_eip4844() { return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, + OpPayloadBuilderError::BlobTransactionRejected, )) } @@ -650,9 +650,7 @@ where // will just pull in its `from` address. let sequencer_tx = sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { - PayloadBuilderError::other( - OptimismPayloadBuilderError::TransactionEcRecoverFailed, - ) + PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) })?; // Cache the depositor account prior to the state transition for the deposit nonce. @@ -667,7 +665,7 @@ where }) .transpose() .map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::AccountLoadFailed( + PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( sequencer_tx.signer(), )) })?; diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 2016fdc6dd9..8a254e9835c 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -2,7 +2,7 @@ /// Optimism specific payload building errors. #[derive(Debug, thiserror::Error)] -pub enum OptimismPayloadBuilderError { +pub enum OpPayloadBuilderError { /// Thrown when a transaction fails to convert to a /// [`reth_primitives::TransactionSignedEcRecovered`]. #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] From 29a9e9779757e569186937ba573aae2f737c0817 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 7 Nov 2024 19:00:48 +0900 Subject: [PATCH 348/970] feat: add `PrimitiveSignature` to `test-vectors compact` tests (#12366) --- crates/cli/commands/src/test_vectors/compact.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 8def25fa39b..c2995170057 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{hex, private::getrandom::getrandom, TxKind}; +use alloy_primitives::{hex, private::getrandom::getrandom, PrimitiveSignature, TxKind}; use arbitrary::Arbitrary; use eyre::{Context, Result}; use proptest::{ @@ -126,7 +126,7 @@ compact_types!( ], // These types require an extra identifier which is usually stored elsewhere (eg. parent type). identifier: [ - // Signature todo we for v we only store parity(true || false), while v can take more values + PrimitiveSignature, Transaction, TxType, TxKind From d31e1d601d2f97bdb6b7c7f39829456e6865fe43 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Nov 2024 10:59:17 +0100 Subject: [PATCH 349/970] chore(sdk): Add `NodePrimitives::Receipt` (#12357) --- crates/ethereum/node/src/node.rs | 3 ++- crates/node/types/src/lib.rs | 3 +++ crates/optimism/node/src/node.rs | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 1942f8a9e56..17a952a58d3 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -26,7 +26,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header}; +use reth_primitives::{Block, Header, Receipt}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -44,6 +44,7 @@ pub struct EthPrimitives; impl NodePrimitives for EthPrimitives { type Block = Block; + type Receipt = Receipt; } /// Type configuration for a regular Ethereum node. diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 38e194bd4fb..6c3ed9ca46e 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -24,10 +24,13 @@ use reth_trie_db::StateCommitment; pub trait NodePrimitives { /// Block primitive. type Block; + /// A receipt. + type Receipt; } impl NodePrimitives for () { type Block = reth_primitives::Block; + type Receipt = (); } /// The type that configures the essential types of an Ethereum-like node. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 541c9bcd483..e39fdfc27a8 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -24,7 +24,7 @@ use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header}; +use reth_primitives::{Block, Header, Receipt}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -46,6 +46,7 @@ pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { type Block = Block; + type Receipt = Receipt; } /// Type configuration for a regular Optimism node. From 037dffeb15b96dc1b9f2851d96973fc15d0e08f8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:51:05 +0700 Subject: [PATCH 350/970] perf(`OpBuiltPayload`): `Arc` `SealedBlock` (#12361) --- crates/optimism/payload/src/builder.rs | 4 ++-- crates/optimism/payload/src/payload.rs | 17 ++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index f7fdf157ce3..8c1b8faa56c 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -356,12 +356,12 @@ where }, }; - let sealed_block = block.seal_slow(); + let sealed_block = Arc::new(block.seal_slow()); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); // create the executed block data let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), + block: sealed_block.clone(), senders: Arc::new(info.executed_senders), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index f3576407909..37224716c75 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -135,7 +135,7 @@ pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -155,7 +155,7 @@ impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, chain_spec: Arc, attributes: OpPayloadBuilderAttributes, @@ -170,7 +170,7 @@ impl OpBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -224,7 +224,7 @@ impl BuiltPayload for &OpBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: OpBuiltPayload) -> Self { - block_to_payload_v1(value.block) + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } @@ -233,7 +233,10 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: OpBuiltPayload) -> Self { let OpBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } @@ -248,7 +251,7 @@ impl From for OpExecutionPayloadEnvelopeV3 { B256::ZERO }; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -275,7 +278,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { B256::ZERO }; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // From 6d05788de2c23b98245d62ea64eeff0063da5e44 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:52:25 +0700 Subject: [PATCH 351/970] perf(`default_ethereum_payload`): reuse `Evm` between txs (#12365) --- crates/ethereum/payload/src/lib.rs | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 403f35d8eff..87ceb4200b1 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -41,7 +41,7 @@ use revm::{ primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; +use revm_primitives::{calc_excess_blob_gas, TxEnv}; use std::sync::Arc; use tracing::{debug, trace, warn}; @@ -212,6 +212,13 @@ where PayloadBuilderError::Internal(err.into()) })?; + let env = EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = evm_config.evm_with_env(&mut db, env); + let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -246,14 +253,8 @@ where } } - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - - // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + // Configure the environment for the tx. + *evm.tx_mut() = evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -279,10 +280,9 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); // add to the total blob gas used if the transaction successfully executed if let Some(blob_tx) = tx.transaction.as_eip4844() { @@ -321,6 +321,9 @@ where executed_txs.push(tx.into_signed()); } + // Release db + drop(evm); + // check if we have a better block if !is_better_payload(best_payload.as_ref(), total_fees) { // can skip building the block From 581cef330358e3ffbeaadd433bc8c0499f824380 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 7 Nov 2024 18:53:11 +0800 Subject: [PATCH 352/970] feat(rpc/admin): return deposit_contract_address for the admin_NodeInfo RPC (#12362) Signed-off-by: jsvisa --- crates/rpc/rpc/src/admin.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 311719a04ed..0358aa3a8d4 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -115,6 +115,7 @@ where .get_final_paris_total_difficulty() .is_some(), terminal_total_difficulty: self.chain_spec.fork(EthereumHardfork::Paris).ttd(), + deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address), ..self.chain_spec.genesis().config.clone() }; From f0a1f919ff4275adacb377af079fe221e8ddf330 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 7 Nov 2024 04:58:59 -0600 Subject: [PATCH 353/970] New panel for new_payload_forkchoice_updated_time_diff metric (#12363) --- etc/grafana/dashboards/overview.json | 74 ++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index a19d3be8cf6..39ccdffe34f 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -5162,6 +5162,80 @@ "title": "Pipeline runs", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Latency histogram for the engine_newPayload to Forkchoice Update", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none" + } + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 188 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{instance=~\"$instance\"}", + "legendFormat": "new_payload_forkchoice_updated", + "range": true, + "refId": "A" + } + ], + "title": "Engine API newPayload Forkchoice Update Latency", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", From 581a2f1d477afe9a525ba2279b0af78410bf87bf Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 7 Nov 2024 15:05:55 +0400 Subject: [PATCH 354/970] refactor(rpc): unify system caller invocations (#12360) --- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 66 ++++++---- crates/rpc/rpc/src/debug.rs | 131 ++++++-------------- 2 files changed, 75 insertions(+), 122 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index fa70b2df2ef..29bde519960 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,6 +1,6 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; use crate::{FromEvmError, RpcNodeCore}; use alloy_primitives::B256; @@ -16,7 +16,9 @@ use reth_rpc_eth_types::{ }; use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; -use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; +use revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, +}; use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; @@ -187,26 +189,13 @@ pub trait Trace: LoadState> { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in let parent_block = block.parent_hash; - let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let block_txs = block.transactions_with_sender(); - // apply relevant system calls - SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // replay all transactions prior to the targeted transaction this.replay_transactions_until( @@ -333,17 +322,7 @@ pub trait Trace: LoadState> { let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - // apply relevant system calls - SystemCaller::new(this.evm_config().clone(), this.provider().chain_spec()) - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - block.header().parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom("failed to apply 4788 system call".to_string()) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // prepare transactions, we do everything upfront to reduce time spent with open // state @@ -470,4 +449,37 @@ pub trait Trace: LoadState> { { self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) } + + /// Applies chain-specific state transitions required before executing a block. + /// + /// Note: This should only be called when tracing an entire block vs individual transactions. + /// When tracing transaction on top of an already committed block state, those transitions are + /// already applied. + fn apply_pre_execution_changes + DatabaseCommit>( + &self, + block: &SealedBlockWithSenders, + db: &mut DB, + cfg: &CfgEnvWithHandlerCfg, + block_env: &BlockEnv, + ) -> Result<(), Self::Error> { + let mut system_caller = + SystemCaller::new(self.evm_config().clone(), self.provider().chain_spec()); + // apply relevant system calls + system_caller + .pre_block_beacon_root_contract_call( + db, + cfg, + block_env, + block.header.parent_beacon_block_root, + ) + .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; + + system_caller + .pre_block_blockhashes_contract_call(db, cfg, block_env, block.header.parent_hash) + .map_err(|_| { + EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) + })?; + + Ok(()) + } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 7e4c8fb8230..5b7e161691c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -16,10 +16,9 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; use reth_evm::{ execute::{BlockExecutorProvider, Executor}, - system_calls::SystemCaller, ConfigureEvmEnv, }; -use reth_primitives::{Block, TransactionSignedEcRecovered}; +use reth_primitives::{Block, SealedBlockWithSenders}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, @@ -93,54 +92,30 @@ where /// Trace the entire block asynchronously async fn trace_block( &self, - at: BlockId, - transactions: Vec, + block: Arc, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, - parent_beacon_block_root: Option, ) -> Result, Eth::Error> { - if transactions.is_empty() { - // nothing to trace - return Ok(Vec::new()) - } - // replay all transactions of the block let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(at, move |state| { - let block_hash = at.as_block_hash(); - let mut results = Vec::with_capacity(transactions.len()); + .spawn_with_state_at_block(block.parent_hash.into(), move |state| { + let mut results = Vec::with_capacity(block.body.transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); - // apply relevant system calls - SystemCaller::new( - this.eth_api().evm_config().clone(), - this.eth_api().provider().chain_spec(), - ) - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; - let mut transactions = transactions.into_iter().enumerate().peekable(); + let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; - while let Some((index, tx)) = transactions.next() { + while let Some((index, (signer, tx))) = transactions.next() { let tx_hash = tx.hash; let env = EnvWithHandlerCfg { env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - RpcNodeCore::evm_config(this.eth_api()) - .tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(this.eth_api()).tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; @@ -149,7 +124,7 @@ where env, &mut db, Some(TransactionContext { - block_hash, + block_hash: Some(block.hash()), tx_hash: Some(tx_hash), tx_index: Some(index), }), @@ -186,45 +161,38 @@ where .map_err(Eth::Error::from_eth_err)?; let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; - // we trace on top the block's parent block - let parent = block.parent_hash; - - // we need the beacon block root for a system call - let parent_beacon_block_root = block.parent_beacon_block_root; // Depending on EIP-2 we need to recover the transactions differently - let transactions = - if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) { - block - .body - .transactions - .into_iter() - .map(|tx| { - tx.into_ecrecovered() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - } else { - block - .body - .transactions - .into_iter() - .map(|tx| { - tx.into_ecrecovered_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - }; + let senders = if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) + { + block + .body + .transactions + .iter() + .map(|tx| { + tx.recover_signer() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + } else { + block + .body + .transactions + .iter() + .map(|tx| { + tx.recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + }; self.trace_block( - parent.into(), - transactions, + Arc::new(block.with_senders_unchecked(senders).seal_slow()), cfg, block_env, opts, - parent_beacon_block_root, ) .await } @@ -248,19 +216,8 @@ where )?; let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; - // we need to get the state of the parent block because we're replaying this block on top of - // its parent block's state - let state_at = block.parent_hash; - self.trace_block( - state_at.into(), - (*block).clone().into_transactions_ecrecovered().collect(), - cfg, - block_env, - opts, - block.parent_beacon_block_root, - ) - .await + self.trace_block(block, cfg, block_env, opts).await } /// Trace the transaction according to the provided options. @@ -281,7 +238,6 @@ where // block the transaction is included in let state_at: BlockId = block.parent_hash.into(); let block_hash = block.hash(); - let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.eth_api() @@ -293,22 +249,7 @@ where let mut db = CacheDB::new(StateProviderDatabase::new(state)); - // apply relevant system calls - SystemCaller::new( - this.eth_api().evm_config().clone(), - this.eth_api().provider().chain_spec(), - ) - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( From eab1a725771dc16e0facccdc4046c91783c66d57 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 7 Nov 2024 19:05:28 +0700 Subject: [PATCH 355/970] perf(`OpPayloadBuilderCtx`): reuse `Evm` between txs (#12369) --- crates/optimism/payload/src/builder.rs | 46 ++++++++++++++------------ 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 8c1b8faa56c..dc6084f4881 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -29,7 +29,7 @@ use reth_transaction_pool::{ use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, + primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState, TxEnv}, Database, DatabaseCommit, }; use tracing::{debug, trace, warn}; @@ -636,6 +636,13 @@ where { let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); + let env = EnvWithHandlerCfg::new_with_cfg_env( + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + for sequencer_tx in &self.attributes().transactions { // A sequencer's block should never contain blob transactions. if sequencer_tx.value().is_eip4844() { @@ -660,7 +667,8 @@ where // nonces, so we don't need to touch the DB for those. let depositor = (self.is_regolith_active() && sequencer_tx.is_deposit()) .then(|| { - db.load_cache_account(sequencer_tx.signer()) + evm.db_mut() + .load_cache_account(sequencer_tx.signer()) .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() @@ -670,13 +678,7 @@ where )) })?; - let env = EnvWithHandlerCfg::new_with_cfg_env( - self.initialized_cfg.clone(), - self.initialized_block_env.clone(), - self.evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), - ); - - let mut evm = self.evm_config.evm_with_env(&mut *db, env); + *evm.tx_mut() = self.evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -694,10 +696,8 @@ where } }; - // to release the db reference drop evm. - drop(evm); // commit changes - db.commit(state); + evm.db_mut().commit(state); let gas_used = result.gas_used(); @@ -738,6 +738,14 @@ where { let block_gas_limit = self.block_gas_limit(); let base_fee = self.base_fee(); + + let env = EnvWithHandlerCfg::new_with_cfg_env( + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction if info.cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { @@ -761,14 +769,9 @@ where // convert tx to a signed transaction let tx = pool_tx.to_recovered_transaction(); - let env = EnvWithHandlerCfg::new_with_cfg_env( - self.initialized_cfg.clone(), - self.initialized_block_env.clone(), - self.evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - // Configure the environment for the block. - let mut evm = self.evm_config.evm_with_env(&mut *db, env); + // Configure the environment for the tx. + *evm.tx_mut() = self.evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -794,10 +797,9 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); let gas_used = result.gas_used(); From cf72b6f38d03a6f8bae5360329a9ceb3d8474fb5 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 7 Nov 2024 21:06:53 +0900 Subject: [PATCH 356/970] chore: move helper methods from `DatabaseProvider` to `DBProvider` as defaults (#12367) --- crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/blockchain-tree/src/chain.rs | 3 +- crates/cli/commands/src/db/checksum.rs | 2 +- crates/storage/db-common/src/db_tool/mod.rs | 2 +- .../provider/src/providers/database/mod.rs | 3 +- .../src/providers/database/provider.rs | 166 +++--------------- .../storage/provider/src/test_utils/blocks.rs | 2 +- .../storage-api/src/database_provider.rs | 119 ++++++++++++- 8 files changed, 153 insertions(+), 148 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 65e55d7d9f8..64705e5ccb5 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -23,8 +23,8 @@ use reth_primitives::{ use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, - ProviderError, StaticFileProviderFactory, + ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, + HeaderProvider, ProviderError, StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 393e525d5ae..09ba5c3f851 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -18,7 +18,8 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, - FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, + DBProvider, FullExecutionDataProvider, ProviderError, StateRootProvider, + TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 60ec09c9606..9aa48e0e865 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -6,7 +6,7 @@ use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; -use reth_provider::providers::ProviderNodeTypes; +use reth_provider::{providers::ProviderNodeTypes, DBProvider}; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 67a5dd62762..3420f2089fd 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -12,7 +12,7 @@ use reth_db_api::{ }; use reth_fs_util as fs; use reth_node_types::NodeTypesWithDB; -use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, DBProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index bd0466ff973..38918f52c23 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -621,7 +621,8 @@ mod tests { use crate::{ providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, - BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, TransactionsProvider, + BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, + TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 266f98aae37..c76f77572ab 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -24,7 +24,6 @@ use reth_db::{ cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, }; use reth_db_api::{ - common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, models::{ @@ -63,7 +62,7 @@ use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, - ops::{Bound, Deref, DerefMut, Range, RangeBounds, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, time::{Duration, Instant}, }; @@ -145,7 +144,7 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// State provider for latest block pub fn latest<'a>(&'a self) -> ProviderResult> { trace!(target: "providers::db", "Returning latest state provider"); @@ -364,7 +363,7 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, @@ -395,75 +394,6 @@ impl DatabaseProvider { &self.chain_spec } - /// Disables long-lived read transaction safety guarantees for leaks prevention and - /// observability improvements. - /// - /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions - /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning - /// that Reth as a node is offline and not progressing. - pub fn disable_long_read_transaction_safety(mut self) -> Self { - self.tx.disable_long_read_transaction_safety(); - self - } - - /// Return full table as Vec - pub fn table(&self) -> Result>, DatabaseError> - where - T::Key: Default + Ord, - { - self.tx - .cursor_read::()? - .walk(Some(T::Key::default()))? - .collect::, DatabaseError>>() - } - - /// Return a list of entries from the table, based on the given range. - #[inline] - pub fn get( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - self.tx.cursor_read::()?.walk_range(range)?.collect::, _>>() - } - - /// Iterates over read only values in the given table and collects them into a vector. - /// - /// Early-returns if the range is empty, without opening a cursor transaction. - fn cursor_read_collect>( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = match range_size_hint(&range) { - Some(0) | None => return Ok(Vec::new()), - Some(capacity) => capacity, - }; - let mut cursor = self.tx.cursor_read::()?; - self.cursor_collect_with_capacity(&mut cursor, range, capacity) - } - - /// Iterates over read only values in the given table and collects them into a vector. - fn cursor_collect>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = range_size_hint(&range).unwrap_or(0); - self.cursor_collect_with_capacity(cursor, range, capacity) - } - - fn cursor_collect_with_capacity>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - capacity: usize, - ) -> ProviderResult> { - let mut items = Vec::with_capacity(capacity); - for entry in cursor.walk_range(range)? { - items.push(entry?.1); - } - Ok(items) - } - fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, @@ -852,44 +782,12 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) } - /// Remove list of entries from the table. Returns the number of entries removed. - #[inline] - pub fn remove( - &self, - range: impl RangeBounds, - ) -> Result { - let mut entries = 0; - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - while walker.next().transpose()?.is_some() { - walker.delete_current()?; - entries += 1; - } - Ok(entries) - } - - /// Return a list of entries from the table, and remove them, based on the given range. - #[inline] - pub fn take( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - let mut items = Vec::new(); - while let Some(i) = walker.next().transpose()? { - walker.delete_current()?; - items.push(i) - } - Ok(items) - } - /// Remove requested block transactions, without returning them. /// /// This will remove block data for the given range from the following tables: @@ -1299,7 +1197,7 @@ impl ChangeSetReader for DatabaseProvider { } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider for DatabaseProvider { fn sync_gap( &self, tip: watch::Receiver, @@ -1343,7 +1241,7 @@ impl HeaderSyncGapProvider for DatabaseProvider { } } -impl> HeaderProvider +impl> HeaderProvider for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -1443,7 +1341,7 @@ impl> HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader for DatabaseProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1470,7 +1368,7 @@ impl BlockHashReader for DatabaseProvider { } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader for DatabaseProvider { fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1501,7 +1399,9 @@ impl BlockNumReader for DatabaseProvider { } } -impl> BlockReader for DatabaseProvider { +impl> BlockReader + for DatabaseProvider +{ fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1676,7 +1576,7 @@ impl> BlockReader for Datab } } -impl> TransactionsProviderExt +impl> TransactionsProviderExt for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and @@ -1746,7 +1646,7 @@ impl> TransactionsProviderE } // Calculates the hash of the given transaction -impl> TransactionsProvider +impl> TransactionsProvider for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { @@ -1906,7 +1806,7 @@ impl> TransactionsProvider } } -impl> ReceiptProvider +impl> ReceiptProvider for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { @@ -1954,7 +1854,7 @@ impl> ReceiptProvider } } -impl> WithdrawalsProvider +impl> WithdrawalsProvider for DatabaseProvider { fn withdrawals_by_block( @@ -1984,7 +1884,7 @@ impl> WithdrawalsProvider } } -impl> EvmEnvProvider +impl> EvmEnvProvider for DatabaseProvider { fn fill_env_at( @@ -2110,7 +2010,7 @@ impl StageCheckpointWriter for DatabaseProvider StorageReader for DatabaseProvider { +impl StorageReader for DatabaseProvider { fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2173,7 +2073,7 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter for DatabaseProvider { fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2550,7 +2450,7 @@ impl StateChangeWriter for DatabaseProvider TrieWriter for DatabaseProvider { +impl TrieWriter for DatabaseProvider { /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2600,7 +2500,7 @@ impl TrieWriter for DatabaseProvider { } } -impl StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2637,7 +2537,7 @@ impl StorageTrieWriter for DatabaseProvider HashingWriter for DatabaseProvider { +impl HashingWriter for DatabaseProvider { fn unwind_account_hashing<'a>( &self, changesets: impl Iterator, @@ -2862,7 +2762,7 @@ impl HashingWriter for DatabaseProvider } } -impl HistoryWriter for DatabaseProvider { +impl HistoryWriter for DatabaseProvider { fn unwind_account_history_indices<'a>( &self, changesets: impl Iterator, @@ -2996,7 +2896,7 @@ impl HistoryWriter for DatabaseProvider } } -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } @@ -3417,7 +3317,7 @@ impl + } } -impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader for DatabaseProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3444,7 +3344,7 @@ impl PruneCheckpointWriter for DatabaseProvider StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3457,7 +3357,7 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader for DatabaseProvider { fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3592,17 +3492,3 @@ fn recover_block_senders( Ok(()) } - -fn range_size_hint(range: &impl RangeBounds) -> Option { - let start = match range.start_bound().cloned() { - Bound::Included(start) => start, - Bound::Excluded(start) => start.checked_add(1)?, - Bound::Unbounded => 0, - }; - let end = match range.end_bound().cloned() { - Bound::Included(end) => end.saturating_add(1), - Bound::Excluded(end) => end, - Bound::Unbounded => return None, - }; - end.checked_sub(start).map(|x| x as _) -} diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 8439aef1609..2c9c108139c 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,5 +1,5 @@ //! Dummy blocks and data for tests -use crate::{DatabaseProviderRW, ExecutionOutcome}; +use crate::{DBProvider, DatabaseProviderRW, ExecutionOutcome}; use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Sealable, TxKind, B256, diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index 6a463ed01e9..20aebce88fe 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -1,6 +1,14 @@ -use reth_db_api::{database::Database, transaction::DbTx}; +use reth_db_api::{ + common::KeyValue, + cursor::DbCursorRO, + database::Database, + table::Table, + transaction::{DbTx, DbTxMut}, + DatabaseError, +}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderResult; +use std::ops::{Bound, RangeBounds}; /// Database provider. pub trait DBProvider: Send + Sync + Sized + 'static { @@ -34,6 +42,101 @@ pub trait DBProvider: Send + Sync + Sized + 'static { /// Returns a reference to prune modes. fn prune_modes_ref(&self) -> &PruneModes; + + /// Return full table as Vec + fn table(&self) -> Result>, DatabaseError> + where + T::Key: Default + Ord, + { + self.tx_ref() + .cursor_read::()? + .walk(Some(T::Key::default()))? + .collect::, DatabaseError>>() + } + + /// Return a list of entries from the table, based on the given range. + #[inline] + fn get( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> { + self.tx_ref().cursor_read::()?.walk_range(range)?.collect::, _>>() + } + + /// Iterates over read only values in the given table and collects them into a vector. + /// + /// Early-returns if the range is empty, without opening a cursor transaction. + fn cursor_read_collect>( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = match range_size_hint(&range) { + Some(0) | None => return Ok(Vec::new()), + Some(capacity) => capacity, + }; + let mut cursor = self.tx_ref().cursor_read::()?; + self.cursor_collect_with_capacity(&mut cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector. + fn cursor_collect>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = range_size_hint(&range).unwrap_or(0); + self.cursor_collect_with_capacity(cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector with + /// capacity. + fn cursor_collect_with_capacity>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + capacity: usize, + ) -> ProviderResult> { + let mut items = Vec::with_capacity(capacity); + for entry in cursor.walk_range(range)? { + items.push(entry?.1); + } + Ok(items) + } + + /// Remove list of entries from the table. Returns the number of entries removed. + #[inline] + fn remove(&self, range: impl RangeBounds) -> Result + where + Self::Tx: DbTxMut, + { + let mut entries = 0; + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + entries += 1; + } + Ok(entries) + } + + /// Return a list of entries from the table, and remove them, based on the given range. + #[inline] + fn take( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> + where + Self::Tx: DbTxMut, + { + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + let mut items = Vec::new(); + while let Some(i) = walker.next().transpose()? { + walker.delete_current()?; + items.push(i) + } + Ok(items) + } } /// Database provider factory. @@ -54,3 +157,17 @@ pub trait DatabaseProviderFactory: Send + Sync { /// Create new read-write database provider. fn database_provider_rw(&self) -> ProviderResult; } + +fn range_size_hint(range: &impl RangeBounds) -> Option { + let start = match range.start_bound().cloned() { + Bound::Included(start) => start, + Bound::Excluded(start) => start.checked_add(1)?, + Bound::Unbounded => 0, + }; + let end = match range.end_bound().cloned() { + Bound::Included(end) => end.saturating_add(1), + Bound::Excluded(end) => end, + Bound::Unbounded => return None, + }; + end.checked_sub(start).map(|x| x as _) +} From 473e172ac4cd9bc160708c6fb1bacf44a2eab355 Mon Sep 17 00:00:00 2001 From: wheval Date: Thu, 7 Nov 2024 13:37:40 +0100 Subject: [PATCH 357/970] chore: change "Reporting a Vulnerability" to "Report a Vulnerability" (#12372) --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index bea27ad1140..7521d62e959 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,5 +1,5 @@ # Security Policy -## Reporting a Vulnerability +## Report a Vulnerability Contact [security@ithaca.xyz](mailto:security@ithaca.xyz). From 764371d246f2e06133a183ee79167b25c18991e4 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:34:24 +0100 Subject: [PATCH 358/970] test(eth-engine): add unit tests for `payload_id` method (#12295) --- .../ethereum/engine-primitives/src/payload.rs | 99 +++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 21c544f16ba..3fe4e76e63f 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -304,10 +304,109 @@ pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> Paylo #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip4895::Withdrawal; + use alloy_primitives::B64; + use std::str::FromStr; #[test] fn attributes_serde() { let attributes = r#"{"timestamp":"0x1235","prevRandao":"0xf343b00e02dc34ec0124241f74f32191be28fb370bb48060f5fa4df99bda774c","suggestedFeeRecipient":"0x0000000000000000000000000000000000000000","withdrawals":null,"parentBeaconBlockRoot":null}"#; let _attributes: PayloadAttributes = serde_json::from_str(attributes).unwrap(); } + + #[test] + fn test_payload_id_basic() { + // Create a parent block and payload attributes + let parent = + B256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 0x5, + prev_randao: B256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xa247243752eb10b4").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_withdrawals() { + // Set up the parent and attributes with withdrawals + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_slice(&[1; 32]), + suggested_fee_recipient: Address::from_str( + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: Some(vec![ + Withdrawal { + index: 1, + validator_index: 123, + address: Address::from([0xAA; 20]), + amount: 10, + }, + Withdrawal { + index: 2, + validator_index: 456, + address: Address::from([0xBB; 20]), + amount: 20, + }, + ]), + parent_beacon_block_root: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xedddc2f84ba59865").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_parent_beacon_block_root() { + // Set up the parent and attributes with a parent beacon block root + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_str( + "0x123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: Some( + B256::from_str( + "0x2222222222222222222222222222222222222222222222222222222222222222", + ) + .unwrap(), + ), + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0x0fc49cd532094cce").unwrap()) + ); + } } From 9596b1c08b2c37469843dd90dfad8717c60e9582 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Nov 2024 15:53:26 +0100 Subject: [PATCH 359/970] chore: rm reth-provider dep (#12376) --- Cargo.lock | 1 - crates/payload/builder/Cargo.toml | 6 +----- crates/payload/builder/src/service.rs | 2 +- crates/payload/builder/src/test_utils.rs | 3 +-- crates/payload/builder/src/traits.rs | 2 +- 5 files changed, 4 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cdc31489b34..7ce5daf705f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8405,7 +8405,6 @@ dependencies = [ "reth-metrics", "reth-payload-primitives", "reth-primitives", - "reth-provider", "revm", "tokio", "tokio-stream", diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 08399b6f9c6..7a536cdbcfa 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -14,10 +14,9 @@ workspace = true [dependencies] # reth reth-primitives = { workspace = true, optional = true } -reth-provider.workspace = true +reth-chain-state.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-chain-state = { workspace = true, optional = true } # alloy alloy-primitives = { workspace = true, optional = true } @@ -38,16 +37,13 @@ tracing.workspace = true [dev-dependencies] reth-primitives.workspace = true -reth-chain-state.workspace = true alloy-primitives.workspace = true revm.workspace = true [features] test-utils = [ "alloy-primitives", - "reth-chain-state", "reth-chain-state/test-utils", "reth-primitives/test-utils", - "reth-provider/test-utils", "revm/test-utils" ] diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 853c69e90d8..43beaf82c38 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -9,11 +9,11 @@ use crate::{ }; use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; +use reth_chain_state::CanonStateNotification; use reth_payload_primitives::{ BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes, }; -use reth_provider::CanonStateNotification; use std::{ fmt, future::Future, diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 746853b74f0..780df5c8463 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -6,10 +6,9 @@ use crate::{ }; use alloy_primitives::U256; -use reth_chain_state::ExecutedBlock; +use reth_chain_state::{CanonStateNotification, ExecutedBlock}; use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; use reth_primitives::Block; -use reth_provider::CanonStateNotification; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 62dadeb45d7..ba8486b6907 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,9 +1,9 @@ //! Trait abstractions used by the payload crate. +use reth_chain_state::CanonStateNotification; use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_provider::CanonStateNotification; use std::future::Future; /// A type that can build a payload. From 37e1f77047b6790f19de500e32e5908a1ca73ac3 Mon Sep 17 00:00:00 2001 From: greg <82421016+greged93@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:58:09 +0100 Subject: [PATCH 360/970] chore: remove unused deconstruction (#12377) Signed-off-by: Gregory Edison --- crates/stages/stages/src/stages/execution.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 47cd9d0445a..88d5f830378 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -8,7 +8,7 @@ use reth_evm::{ execute::{BatchExecutor, BlockExecutorProvider}, metrics::ExecutorMetrics, }; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives::{Header, SealedHeader, StaticFileSegment}; use reth_primitives_traits::format_gas_throughput; @@ -325,8 +325,7 @@ where // prepare execution output for writing let time = Instant::now(); - let ExecutionOutcome { bundle, receipts, requests, first_block } = executor.finalize(); - let state = ExecutionOutcome::new(bundle, receipts, first_block, requests); + let state = executor.finalize(); let write_preparation_duration = time.elapsed(); // log the gas per second for the range we just executed From 190a1d8bb4df777cf19a90ce072e9c4435a6bcfb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 8 Nov 2024 00:21:53 +0900 Subject: [PATCH 361/970] feat(trie): reveal storage slots and calculate storage root in sparse trie (#12145) --- crates/trie/sparse/src/state.rs | 129 ++++++++++++++++++++++++-------- 1 file changed, 96 insertions(+), 33 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index cfb17ef36ff..126e05e8582 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,3 +1,5 @@ +use std::iter::Peekable; + use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -12,10 +14,8 @@ pub struct SparseStateTrie { /// Sparse account trie. pub(crate) state: SparseTrie, /// Sparse storage tries. - #[allow(dead_code)] pub(crate) storages: HashMap, /// Collection of revealed account and storage keys. - #[allow(dead_code)] pub(crate) revealed: HashMap>, } @@ -35,7 +35,7 @@ impl SparseStateTrie { self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) } - /// Reveal unknown trie paths from provided leaf path and its proof. + /// Reveal unknown trie paths from provided leaf path and its proof for the account. /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( &mut self, @@ -44,22 +44,12 @@ impl SparseStateTrie { ) -> SparseStateTrieResult<()> { let mut proof = proof.into_iter().peekable(); - // reveal root and initialize the trie if not already - let Some((path, node)) = proof.next() else { return Ok(()) }; - if !path.is_empty() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) - } - - // Decode root node and perform sanity check. - let root_node = TrieNode::decode(&mut &node[..])?; - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) - } + let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. let trie = self.state.reveal_root(root_node)?; - // add the remaining proof nodes + // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; trie.reveal_node(path, node)?; @@ -71,6 +61,55 @@ impl SparseStateTrie { Ok(()) } + /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_storage_slot( + &mut self, + account: B256, + slot: B256, + proof: impl IntoIterator, + ) -> SparseStateTrieResult<()> { + let mut proof = proof.into_iter().peekable(); + + let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + + // Reveal root node if it wasn't already. + let trie = self.storages.entry(account).or_default().reveal_root(root_node)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in proof { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + + // Mark leaf path as revealed. + self.revealed.entry(account).or_default().insert(slot); + + Ok(()) + } + + /// Validates the root node of the proof and returns it if it exists and is valid. + fn validate_proof>( + &self, + proof: &mut Peekable, + ) -> SparseStateTrieResult> { + let mut proof = proof.into_iter().peekable(); + + // Validate root node. + let Some((path, node)) = proof.next() else { return Ok(None) }; + if !path.is_empty() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Decode root node and perform sanity check. + let root_node = TrieNode::decode(&mut &node[..])?; + if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + Ok(Some(root_node)) + } + /// Update the leaf node. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { self.state.update_leaf(path, value)?; @@ -81,6 +120,11 @@ impl SparseStateTrie { pub fn root(&mut self) -> Option { self.state.root() } + + /// Returns storage sparse trie root if the trie has been revealed. + pub fn storage_root(&mut self, account: B256) -> Option { + self.storages.get_mut(&account).and_then(|trie| trie.root()) + } } #[cfg(test)] @@ -93,7 +137,30 @@ mod tests { use reth_trie_common::proof::ProofRetainer; #[test] - fn sparse_trie_reveal_empty() { + fn validate_proof_first_node_not_root() { + let sparse = SparseStateTrie::default(); + let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; + assert_matches!( + sparse.validate_proof(&mut proof.into_iter().peekable()), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn validate_proof_invalid_proof_with_empty_root() { + let sparse = SparseStateTrie::default(); + let proof = [ + (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), + (Nibbles::from_nibbles([0x1]), Bytes::new()), + ]; + assert_matches!( + sparse.validate_proof(&mut proof.into_iter().peekable()), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn reveal_account_empty() { let retainer = ProofRetainer::from_iter([Nibbles::default()]); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); hash_builder.root(); @@ -107,25 +174,21 @@ mod tests { } #[test] - fn reveal_first_node_not_root() { - let mut sparse = SparseStateTrie::default(); - let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) - ); - } + fn reveal_storage_slot_empty() { + let retainer = ProofRetainer::from_iter([Nibbles::default()]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + hash_builder.root(); + let proofs = hash_builder.take_proof_nodes(); + assert_eq!(proofs.len(), 1); - #[test] - fn reveal_invalid_proof_with_empty_root() { let mut sparse = SparseStateTrie::default(); - let proof = [ - (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), - (Nibbles::from_nibbles([0x1]), Bytes::new()), - ]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) + assert!(sparse.storages.is_empty()); + sparse + .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) + .unwrap(); + assert_eq!( + sparse.storages, + HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } } From cf095a7536d9a21a1c16cfb9dac2654a1889f1e8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Nov 2024 16:42:37 +0100 Subject: [PATCH 362/970] chore: reorder super (#12380) --- crates/consensus/auto-seal/src/lib.rs | 3 +-- crates/etl/src/lib.rs | 3 +-- crates/net/discv5/src/config.rs | 6 ++---- crates/net/discv5/src/filter.rs | 6 ++---- crates/net/network/src/config.rs | 3 +-- crates/net/network/src/transactions/fetcher.rs | 9 +++------ crates/net/network/src/transactions/validation.rs | 1 - crates/net/peers/src/node_record.rs | 3 +-- crates/rpc/ipc/src/client/mod.rs | 3 +-- crates/transaction-pool/src/pool/txpool.rs | 5 ++--- crates/trie/trie/src/state.rs | 3 +-- 11 files changed, 15 insertions(+), 30 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index ad7e66acc0e..a2f9fa4fa03 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -438,11 +438,10 @@ impl StorageInner { #[cfg(test)] mod tests { + use super::*; use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition}; use reth_primitives::Transaction; - use super::*; - #[test] fn test_block_hash() { let mut storage = StorageInner::default(); diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index d30f432f9c1..46d41d704d0 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -281,9 +281,8 @@ impl EtlFile { #[cfg(test)] mod tests { - use alloy_primitives::{TxHash, TxNumber}; - use super::*; + use alloy_primitives::{TxHash, TxNumber}; #[test] fn etl_hashes() { diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 203ef76134b..4a534afbef5 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -477,11 +477,9 @@ impl BootNode { #[cfg(test)] mod test { - use std::net::SocketAddrV4; - - use alloy_primitives::hex; - use super::*; + use alloy_primitives::hex; + use std::net::SocketAddrV4; const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 325544de6c1..a83345a9a5e 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -89,13 +89,11 @@ impl MustNotIncludeKeys { #[cfg(test)] mod tests { + use super::*; + use crate::NetworkStackId; use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::NetworkStackId; - - use super::*; - #[test] fn must_not_include_key_filter() { // rig test diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 72627f5b657..96aef249d9f 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -631,14 +631,13 @@ impl NetworkMode { #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; use rand::thread_rng; use reth_chainspec::{Chain, MAINNET}; use reth_dns_discovery::tree::LinkEntry; use reth_primitives::ForkHash; use reth_provider::test_utils::NoopProvider; + use std::sync::Arc; fn builder() -> NetworkConfigBuilder { let secret_key = SecretKey::new(&mut thread_rng()); diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 3e856951552..00a9158233b 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -1343,16 +1343,13 @@ struct TxFetcherSearchDurations { #[cfg(test)] mod test { - use std::{collections::HashSet, str::FromStr}; - + use super::*; + use crate::transactions::tests::{default_cache, new_mock_session}; use alloy_primitives::{hex, B256}; use alloy_rlp::Decodable; use derive_more::IntoIterator; use reth_primitives::TransactionSigned; - - use crate::transactions::tests::{default_cache, new_mock_session}; - - use super::*; + use std::{collections::HashSet, str::FromStr}; #[derive(IntoIterator)] struct TestValidAnnouncementData(Vec<(TxHash, Option<(u8, usize)>)>); diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 4038f23e85c..7bfe07761a2 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -336,7 +336,6 @@ impl FilterAnnouncement for EthMessageFilter { #[cfg(test)] mod test { use super::*; - use alloy_primitives::B256; use reth_eth_wire::{NewPooledTransactionHashes66, NewPooledTransactionHashes68}; use std::{collections::HashMap, str::FromStr}; diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index d6836d88193..ed48e242c1d 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -231,12 +231,11 @@ impl TryFrom<&Enr> for NodeRecord { #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Decodable; use rand::{thread_rng, Rng, RngCore}; use std::net::Ipv6Addr; - use super::*; - #[test] fn test_mapped_ipv6() { let mut rng = thread_rng(); diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 8f2fe0255c7..48f58e77a4a 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -136,10 +136,9 @@ pub enum IpcError { #[cfg(test)] mod tests { - use interprocess::local_socket::ListenerOptions; - use super::*; use crate::server::dummy_name; + use interprocess::local_socket::ListenerOptions; #[tokio::test] async fn test_connect() { diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 8679a4318be..1258f4a270a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1936,15 +1936,14 @@ impl SenderInfo { #[cfg(test)] mod tests { - use alloy_primitives::address; - use reth_primitives::TxType; - use super::*; use crate::{ test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet}, traits::TransactionOrigin, SubPoolLimit, }; + use alloy_primitives::address; + use reth_primitives::TxType; #[test] fn test_insert_blob() { diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 2af48dfff79..eca126744e9 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -347,6 +347,7 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { + use super::*; use alloy_primitives::Bytes; use revm::{ db::{ @@ -356,8 +357,6 @@ mod tests { primitives::{AccountInfo, Bytecode}, }; - use super::*; - #[test] fn hashed_state_wiped_extension() { let hashed_address = B256::default(); From b1642f966f8932baf40d52947e56c0e0825fe994 Mon Sep 17 00:00:00 2001 From: Kunal Arora <55632507+aroralanuk@users.noreply.github.com> Date: Thu, 7 Nov 2024 21:03:31 +0530 Subject: [PATCH 363/970] feat(payload): add support for stack of payload builders (#11004) Co-authored-by: Matthias Seitz --- crates/payload/basic/src/lib.rs | 18 ++ crates/payload/basic/src/stack.rs | 270 ++++++++++++++++++++++++++++++ 2 files changed, 288 insertions(+) create mode 100644 crates/payload/basic/src/stack.rs diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 867125d808b..0fc63a5a149 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -42,6 +42,9 @@ use tokio::{ use tracing::{debug, trace, warn}; mod metrics; +mod stack; + +pub use stack::PayloadBuilderStack; /// The [`PayloadJobGenerator`] that creates [`BasicPayloadJob`]s. #[derive(Debug)] @@ -783,6 +786,21 @@ impl BuildOutcome { pub const fn is_cancelled(&self) -> bool { matches!(self, Self::Cancelled) } + + /// Applies a fn on the current payload. + pub(crate) fn map_payload(self, f: F) -> BuildOutcome

+ where + F: FnOnce(Payload) -> P, + { + match self { + Self::Better { payload, cached_reads } => { + BuildOutcome::Better { payload: f(payload), cached_reads } + } + Self::Aborted { fees, cached_reads } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(f(payload)), + } + } } /// The possible outcomes of a payload building attempt without reused [`CachedReads`] diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs new file mode 100644 index 00000000000..722399ab278 --- /dev/null +++ b/crates/payload/basic/src/stack.rs @@ -0,0 +1,270 @@ +use crate::{ + BuildArguments, BuildOutcome, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, + PayloadConfig, +}; + +use alloy_primitives::{Address, B256, U256}; +use reth_payload_builder::PayloadId; +use reth_payload_primitives::BuiltPayload; +use reth_primitives::{SealedBlock, Withdrawals}; + +use alloy_eips::eip7685::Requests; +use std::{error::Error, fmt}; + +/// hand rolled Either enum to handle two builder types +#[derive(Debug, Clone)] +pub enum Either { + /// left variant + Left(L), + /// right variant + Right(R), +} + +impl fmt::Display for Either +where + L: fmt::Display, + R: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Left(l) => write!(f, "Left: {}", l), + Self::Right(r) => write!(f, "Right: {}", r), + } + } +} + +impl Error for Either +where + L: Error + 'static, + R: Error + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + Self::Left(l) => Some(l), + Self::Right(r) => Some(r), + } + } +} + +impl PayloadBuilderAttributes for Either +where + L: PayloadBuilderAttributes, + R: PayloadBuilderAttributes, + L::Error: Error + 'static, + R::Error: Error + 'static, +{ + type RpcPayloadAttributes = Either; + type Error = Either; + + fn try_new( + parent: B256, + rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, + ) -> Result { + match rpc_payload_attributes { + Either::Left(attr) => { + L::try_new(parent, attr, version).map(Either::Left).map_err(Either::Left) + } + Either::Right(attr) => { + R::try_new(parent, attr, version).map(Either::Right).map_err(Either::Right) + } + } + } + + fn payload_id(&self) -> PayloadId { + match self { + Self::Left(l) => l.payload_id(), + Self::Right(r) => r.payload_id(), + } + } + + fn parent(&self) -> B256 { + match self { + Self::Left(l) => l.parent(), + Self::Right(r) => r.parent(), + } + } + + fn timestamp(&self) -> u64 { + match self { + Self::Left(l) => l.timestamp(), + Self::Right(r) => r.timestamp(), + } + } + + fn parent_beacon_block_root(&self) -> Option { + match self { + Self::Left(l) => l.parent_beacon_block_root(), + Self::Right(r) => r.parent_beacon_block_root(), + } + } + + fn suggested_fee_recipient(&self) -> Address { + match self { + Self::Left(l) => l.suggested_fee_recipient(), + Self::Right(r) => r.suggested_fee_recipient(), + } + } + + fn prev_randao(&self) -> B256 { + match self { + Self::Left(l) => l.prev_randao(), + Self::Right(r) => r.prev_randao(), + } + } + + fn withdrawals(&self) -> &Withdrawals { + match self { + Self::Left(l) => l.withdrawals(), + Self::Right(r) => r.withdrawals(), + } + } +} + +/// this structure enables the chaining of multiple `PayloadBuilder` implementations, +/// creating a hierarchical fallback system. It's designed to be nestable, allowing +/// for complex builder arrangements like `Stack, C>` with different +#[derive(Debug)] +pub struct PayloadBuilderStack { + left: L, + right: R, +} + +impl PayloadBuilderStack { + /// Creates a new `PayloadBuilderStack` with the given left and right builders. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } +} + +impl Clone for PayloadBuilderStack +where + L: Clone, + R: Clone, +{ + fn clone(&self) -> Self { + Self::new(self.left.clone(), self.right.clone()) + } +} + +impl BuiltPayload for Either +where + L: BuiltPayload, + R: BuiltPayload, +{ + fn block(&self) -> &SealedBlock { + match self { + Self::Left(l) => l.block(), + Self::Right(r) => r.block(), + } + } + + fn fees(&self) -> U256 { + match self { + Self::Left(l) => l.fees(), + Self::Right(r) => r.fees(), + } + } + + fn requests(&self) -> Option { + match self { + Self::Left(l) => l.requests(), + Self::Right(r) => r.requests(), + } + } +} + +impl PayloadBuilder for PayloadBuilderStack +where + L: PayloadBuilder + Unpin + 'static, + R: PayloadBuilder + Unpin + 'static, + Client: Clone, + Pool: Clone, + L::Attributes: Unpin + Clone, + R::Attributes: Unpin + Clone, + L::BuiltPayload: Unpin + Clone, + R::BuiltPayload: Unpin + Clone, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, +{ + type Attributes = Either; + type BuiltPayload = Either; + + fn try_build( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + match args.config.attributes { + Either::Left(ref left_attr) => { + let left_args: BuildArguments = + BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: left_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Left(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.left.try_build(left_args).map(|out| out.map_payload(Either::Left)) + } + Either::Right(ref right_attr) => { + let right_args = BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: right_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Right(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.right.try_build(right_args).map(|out| out.map_payload(Either::Right)) + } + } + } + + fn build_empty_payload( + &self, + client: &Client, + config: PayloadConfig, + ) -> Result { + match config.attributes { + Either::Left(left_attr) => { + let left_config = PayloadConfig { + attributes: left_attr, + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + }; + self.left.build_empty_payload(client, left_config).map(Either::Left) + } + Either::Right(right_attr) => { + let right_config = PayloadConfig { + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + attributes: right_attr, + }; + self.right.build_empty_payload(client, right_config).map(Either::Right) + } + } + } +} From e911fe9ff03528aef9498a94c4ba7467fdeeb6b6 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 7 Nov 2024 10:30:54 -0600 Subject: [PATCH 364/970] renamed OptimismBlockExecutionError to OpBlockExecutionError (#12383) --- crates/optimism/evm/src/error.rs | 8 +++---- crates/optimism/evm/src/execute.rs | 8 +++---- crates/optimism/evm/src/l1.rs | 36 +++++++++++++++--------------- crates/optimism/evm/src/lib.rs | 2 +- crates/optimism/rpc/src/error.rs | 4 ++-- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 71f8709e1ad..db042950674 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -5,7 +5,7 @@ use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors #[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] -pub enum OptimismBlockExecutionError { +pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info #[display("could not get L1 block info from L2 block: {message}")] L1BlockInfoError { @@ -23,10 +23,10 @@ pub enum OptimismBlockExecutionError { AccountLoadFailed(alloy_primitives::Address), } -impl core::error::Error for OptimismBlockExecutionError {} +impl core::error::Error for OpBlockExecutionError {} -impl From for BlockExecutionError { - fn from(err: OptimismBlockExecutionError) -> Self { +impl From for BlockExecutionError { + fn from(err: OpBlockExecutionError) -> Self { Self::other(err) } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 3702f13a47d..8d701cda423 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,6 +1,6 @@ //! Optimism block execution strategy. -use crate::{l1::ensure_create2_deployer, OpEvmConfig, OptimismBlockExecutionError}; +use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; @@ -144,7 +144,7 @@ where // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + .map_err(|_| OpBlockExecutionError::ForceCreate2DeployerFail)?; Ok(()) } @@ -178,7 +178,7 @@ where // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) + return Err(OpBlockExecutionError::BlobTransactionRejected.into()) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -193,7 +193,7 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; + .map_err(|_| OpBlockExecutionError::AccountLoadFailed(*sender))?; self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index e0668ab0204..cdd33510c92 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,6 +1,6 @@ //! Optimism-specific implementation and utilities for the executor -use crate::OptimismBlockExecutionError; +use crate::OpBlockExecutionError; use alloc::{string::ToString, sync::Arc}; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; @@ -31,17 +31,17 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(body: &BlockBody) -> Result { +pub fn extract_l1_info(body: &BlockBody) -> Result { let l1_info_tx_data = body .transactions .first() - .ok_or_else(|| OptimismBlockExecutionError::L1BlockInfoError { + .ok_or_else(|| OpBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), }) .map(|tx| tx.input())?; if l1_info_tx_data.len() < 4 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "invalid l1 block info transaction calldata in the L2 block".to_string(), }) } @@ -52,7 +52,7 @@ pub fn extract_l1_info(body: &BlockBody) -> Result Result { +pub fn parse_l1_info(input: &[u8]) -> Result { // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. @@ -64,7 +64,7 @@ pub fn parse_l1_info(input: &[u8]) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -76,23 +76,23 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result -pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "unexpected l1 block info tx calldata length found".to_string(), }) } @@ -142,22 +142,22 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result Date: Thu, 7 Nov 2024 17:56:35 +0100 Subject: [PATCH 365/970] chore: improve deps for payload prims (#12374) --- .config/zepter.yaml | 1 + Cargo.lock | 2 +- crates/consensus/beacon/Cargo.toml | 2 +- crates/optimism/payload/Cargo.toml | 2 +- crates/payload/primitives/Cargo.toml | 7 +++++-- crates/payload/primitives/src/payload.rs | 5 +++-- crates/payload/primitives/src/traits.rs | 11 ++++------- 7 files changed, 16 insertions(+), 14 deletions(-) diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 8c6425f4ff0..f5d320b4af9 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -16,6 +16,7 @@ workflows: # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "--left-side-outside-workspace=ignore", # Auxillary flags: "--offline", diff --git a/Cargo.lock b/Cargo.lock index 7ce5daf705f..34a6fe0f98a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8417,7 +8417,7 @@ version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-engine", "async-trait", "op-alloy-rpc-types-engine", "pin-project", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 1abc09b2a44..d3aa5124668 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -32,7 +32,7 @@ reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["std"] } alloy-eips.workspace = true # async diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 4b8e64f2dba..839355b2158 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -22,7 +22,7 @@ reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true +reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true reth-trie.workspace = true reth-chain-state.workspace = true diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index ad8ce63a7e9..951108e7da3 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -22,8 +22,8 @@ reth-chain-state.workspace = true # alloy alloy-eips.workspace = true alloy-primitives.workspace = true -alloy-rpc-types = { workspace = true, features = ["engine"] } -op-alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +op-alloy-rpc-types-engine = { workspace = true, optional = true } # async async-trait.workspace = true @@ -35,3 +35,6 @@ pin-project.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true + +[features] +op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index fc685559e08..bcf48cea834 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -1,6 +1,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::B256; -use alloy_rpc_types::engine::ExecutionPayload; +use alloy_rpc_types_engine::ExecutionPayload; /// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. /// @@ -39,7 +40,7 @@ where Attributes: PayloadAttributes, { /// Return the withdrawals for the payload or attributes. - pub fn withdrawals(&self) -> Option<&Vec> { + pub fn withdrawals(&self) -> Option<&Vec> { match self { Self::ExecutionPayload { payload, .. } => payload.withdrawals(), Self::PayloadAttributes(attributes) => attributes.withdrawals(), diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index a78dc8c1322..7ae558b9945 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,11 +1,7 @@ use crate::{PayloadEvents, PayloadKind, PayloadTypes}; -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ - engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, - Withdrawal, -}; -use op_alloy_rpc_types_engine::OpPayloadAttributes; +use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; use reth_primitives::{SealedBlock, Withdrawals}; use tokio::sync::oneshot; @@ -146,7 +142,8 @@ impl PayloadAttributes for EthPayloadAttributes { } } -impl PayloadAttributes for OpPayloadAttributes { +#[cfg(feature = "op")] +impl PayloadAttributes for op_alloy_rpc_types_engine::OpPayloadAttributes { fn timestamp(&self) -> u64 { self.payload_attributes.timestamp } From 29da7d744a52f9d77d27cc78705dc30e265f20bf Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 7 Nov 2024 23:31:17 +0400 Subject: [PATCH 366/970] fix: `eth_getProof` response (#12370) --- crates/rpc/rpc-eth-api/src/helpers/state.rs | 2 +- crates/rpc/rpc-types-compat/src/proof.rs | 25 ++++++++++++++------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index f3796b4b9bb..6a34967058b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -122,7 +122,7 @@ pub trait EthState: LoadState + SpawnBlocking { let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; - Ok(from_primitive_account_proof(proof)) + Ok(from_primitive_account_proof(proof, keys)) }) .await }) diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs index 7bdf629e96a..34128801f8d 100644 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ b/crates/rpc/rpc-types-compat/src/proof.rs @@ -5,16 +5,18 @@ use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; use reth_trie_common::{AccountProof, StorageProof}; /// Creates a new rpc storage proof from a primitive storage proof type. -pub fn from_primitive_storage_proof(proof: StorageProof) -> EIP1186StorageProof { - EIP1186StorageProof { - key: JsonStorageKey::Hash(proof.key), - value: proof.value, - proof: proof.proof, - } +pub fn from_primitive_storage_proof( + proof: StorageProof, + slot: JsonStorageKey, +) -> EIP1186StorageProof { + EIP1186StorageProof { key: slot, value: proof.value, proof: proof.proof } } /// Creates a new rpc account proof from a primitive account proof type. -pub fn from_primitive_account_proof(proof: AccountProof) -> EIP1186AccountProofResponse { +pub fn from_primitive_account_proof( + proof: AccountProof, + slots: Vec, +) -> EIP1186AccountProofResponse { let info = proof.info.unwrap_or_default(); EIP1186AccountProofResponse { address: proof.address, @@ -23,6 +25,13 @@ pub fn from_primitive_account_proof(proof: AccountProof) -> EIP1186AccountProofR nonce: info.nonce, storage_hash: proof.storage_root, account_proof: proof.proof, - storage_proof: proof.storage_proofs.into_iter().map(from_primitive_storage_proof).collect(), + storage_proof: proof + .storage_proofs + .into_iter() + .filter_map(|proof| { + let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; + Some(from_primitive_storage_proof(proof, *input_slot)) + }) + .collect(), } } From d19032fca1a49e0d826cc1f1dd8475e25493ecb5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 8 Nov 2024 01:29:49 +0400 Subject: [PATCH 367/970] chore: remove auto-seal consensus (#12385) --- .github/assets/check_wasm.sh | 1 - Cargo.lock | 34 -- Cargo.toml | 2 - crates/consensus/auto-seal/Cargo.toml | 57 -- crates/consensus/auto-seal/src/client.rs | 132 ----- crates/consensus/auto-seal/src/lib.rs | 690 ----------------------- crates/consensus/auto-seal/src/mode.rs | 166 ------ crates/consensus/auto-seal/src/task.rs | 221 -------- crates/ethereum/node/Cargo.toml | 1 - crates/ethereum/node/src/node.rs | 7 +- crates/ethereum/node/tests/e2e/dev.rs | 12 - crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/launch/common.rs | 11 +- crates/node/builder/src/launch/engine.rs | 9 +- crates/node/builder/src/launch/mod.rs | 46 +- crates/optimism/node/Cargo.toml | 2 - crates/optimism/node/src/node.rs | 6 +- docs/repo/layout.md | 1 - 18 files changed, 11 insertions(+), 1388 deletions(-) delete mode 100644 crates/consensus/auto-seal/Cargo.toml delete mode 100644 crates/consensus/auto-seal/src/client.rs delete mode 100644 crates/consensus/auto-seal/src/lib.rs delete mode 100644 crates/consensus/auto-seal/src/mode.rs delete mode 100644 crates/consensus/auto-seal/src/task.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 3b2f2d6b7e6..c34f82d2e31 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,7 +11,6 @@ exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 # The following require investigation if they can be fixed - reth-auto-seal-consensus reth-basic-payload-builder reth-beacon-consensus reth-bench diff --git a/Cargo.lock b/Cargo.lock index 34a6fe0f98a..60bf29c2a07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6385,37 +6385,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-auto-seal-consensus" -version = "1.1.1" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-rpc-types-engine", - "futures-util", - "reth-beacon-consensus", - "reth-chainspec", - "reth-consensus", - "reth-engine-primitives", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-network-p2p", - "reth-network-peers", - "reth-optimism-consensus", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-tokio-util", - "reth-transaction-pool", - "reth-trie", - "revm-primitives", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" version = "1.1.1" @@ -7924,7 +7893,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chain-state", @@ -8043,7 +8011,6 @@ dependencies = [ "futures", "rand 0.8.5", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8266,7 +8233,6 @@ dependencies = [ "op-alloy-rpc-types-engine", "parking_lot", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", diff --git a/Cargo.toml b/Cargo.toml index 8ff8c1eb7fe..dc783f071a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/auto-seal/", "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", @@ -299,7 +298,6 @@ overflow-checks = true # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml deleted file mode 100644 index f2bfb43bcce..00000000000 --- a/crates/consensus/auto-seal/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "reth-auto-seal-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "A consensus impl for local testing purposes" - -[lints] -workspace = true - -[dependencies] -# reth -reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true -reth-primitives.workspace = true -reth-execution-errors.workspace = true -reth-execution-types.workspace = true -reth-network-p2p.workspace = true -reth-provider.workspace = true -reth-stages-api.workspace = true -reth-revm.workspace = true -reth-transaction-pool.workspace = true -reth-evm.workspace = true -reth-engine-primitives.workspace = true -reth-consensus.workspace = true -reth-network-peers.workspace = true -reth-tokio-util.workspace = true -reth-trie.workspace = true - -# ethereum -alloy-eips.workspace = true -alloy-primitives.workspace = true -revm-primitives.workspace = true -alloy-rpc-types-engine.workspace = true - -# optimism -reth-optimism-consensus = { workspace = true, optional = true } - -# async -futures-util.workspace = true -tokio = { workspace = true, features = ["sync", "time"] } -tokio-stream.workspace = true -tracing.workspace = true - -[features] -optimism = [ - "reth-provider/optimism", - "reth-optimism-consensus", - "reth-beacon-consensus/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus?/optimism", - "reth-primitives/optimism", - "revm-primitives/optimism" -] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs deleted file mode 100644 index 0083192d7df..00000000000 --- a/crates/consensus/auto-seal/src/client.rs +++ /dev/null @@ -1,132 +0,0 @@ -//! This includes download client implementations for auto sealing miners. - -use crate::Storage; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::B256; -use reth_network_p2p::{ - bodies::client::{BodiesClient, BodiesFut}, - download::DownloadClient, - headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, - priority::Priority, -}; -use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header}; -use std::fmt::Debug; -use tracing::{trace, warn}; - -/// A download client that polls the miner for transactions and assembles blocks to be returned in -/// the download process. -/// -/// When polled, the miner will assemble blocks when miners produce ready transactions and store the -/// blocks in memory. -#[derive(Debug, Clone)] -pub struct AutoSealClient { - storage: Storage, -} - -impl AutoSealClient { - pub(crate) const fn new(storage: Storage) -> Self { - Self { storage } - } - - async fn fetch_headers(&self, request: HeadersRequest) -> Vec

{ - trace!(target: "consensus::auto", ?request, "received headers request"); - - let storage = self.storage.read().await; - let HeadersRequest { start, limit, direction } = request; - let mut headers = Vec::new(); - - let mut block: BlockHashOrNumber = match start { - BlockHashOrNumber::Hash(start) => start.into(), - BlockHashOrNumber::Number(num) => { - if let Some(hash) = storage.block_hash(num) { - hash.into() - } else { - warn!(target: "consensus::auto", num, "no matching block found"); - return headers - } - } - }; - - for _ in 0..limit { - // fetch from storage - if let Some(header) = storage.header_by_hash_or_number(block) { - match direction { - HeadersDirection::Falling => block = header.parent_hash.into(), - HeadersDirection::Rising => { - let next = header.number + 1; - block = next.into() - } - } - headers.push(header); - } else { - break - } - } - - trace!(target: "consensus::auto", ?headers, "returning headers"); - - headers - } - - async fn fetch_bodies(&self, hashes: Vec) -> Vec { - trace!(target: "consensus::auto", ?hashes, "received bodies request"); - let storage = self.storage.read().await; - let mut bodies = Vec::new(); - for hash in hashes { - if let Some(body) = storage.bodies.get(&hash).cloned() { - bodies.push(body); - } else { - break - } - } - - trace!(target: "consensus::auto", ?bodies, "returning bodies"); - - bodies - } -} - -impl HeadersClient for AutoSealClient { - type Output = HeadersFut; - - fn get_headers_with_priority( - &self, - request: HeadersRequest, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let headers = this.fetch_headers(request).await; - Ok(WithPeerId::new(PeerId::random(), headers)) - }) - } -} - -impl BodiesClient for AutoSealClient { - type Output = BodiesFut; - - fn get_block_bodies_with_priority( - &self, - hashes: Vec, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let bodies = this.fetch_bodies(hashes).await; - Ok(WithPeerId::new(PeerId::random(), bodies)) - }) - } -} - -impl DownloadClient for AutoSealClient { - fn report_bad_message(&self, _peer_id: PeerId) { - warn!("Reported a bad message on a miner, we should never produce bad blocks"); - // noop - } - - fn num_connected_peers(&self) -> usize { - // no such thing as connected peers when we are mining ourselves - 1 - } -} diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs deleted file mode 100644 index a2f9fa4fa03..00000000000 --- a/crates/consensus/auto-seal/src/lib.rs +++ /dev/null @@ -1,690 +0,0 @@ -//! A [Consensus] implementation for local testing purposes -//! that automatically seals blocks. -//! -//! The Mining task polls a [`MiningMode`], and will return a list of transactions that are ready to -//! be mined. -//! -//! These downloaders poll the miner, assemble the block, and return transactions that are ready to -//! be mined. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use alloy_eips::{eip1898::BlockHashOrNumber, eip7685::Requests}; -use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - proofs, Block, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, - TransactionSigned, Withdrawals, -}; -use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::TransactionPool; -use reth_trie::HashedPostState; -use revm_primitives::calc_excess_blob_gas; -use std::{ - collections::HashMap, - fmt::Debug, - sync::Arc, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::sync::{mpsc::UnboundedSender, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tracing::trace; - -mod client; -mod mode; -mod task; - -pub use crate::client::AutoSealClient; -pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -pub use task::MiningTask; - -/// A consensus implementation intended for local development and testing purposes. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct AutoSealConsensus { - /// Configuration - chain_spec: Arc, -} - -impl AutoSealConsensus { - /// Create a new instance of [`AutoSealConsensus`] - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } - } -} - -impl Consensus for AutoSealConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_post_execution( - &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - Ok(()) - } -} - -/// Builder type for configuring the setup -#[derive(Debug)] -pub struct AutoSealBuilder { - client: Client, - consensus: AutoSealConsensus, - pool: Pool, - mode: MiningMode, - storage: Storage, - to_engine: UnboundedSender>, - evm_config: EvmConfig, -} - -// === impl AutoSealBuilder === - -impl - AutoSealBuilder -where - Client: BlockReaderIdExt, - Pool: TransactionPool, - Engine: EngineTypes, - ChainSpec: EthChainSpec, -{ - /// Creates a new builder instance to configure all parts. - pub fn new( - chain_spec: Arc, - client: Client, - pool: Pool, - to_engine: UnboundedSender>, - mode: MiningMode, - evm_config: EvmConfig, - ) -> Self { - let latest_header = client.latest_header().ok().flatten().unwrap_or_else(|| { - SealedHeader::new(chain_spec.genesis_header().clone(), chain_spec.genesis_hash()) - }); - - Self { - storage: Storage::new(latest_header), - client, - consensus: AutoSealConsensus::new(chain_spec), - pool, - mode, - to_engine, - evm_config, - } - } - - /// Sets the [`MiningMode`] it operates in, default is [`MiningMode::Auto`] - pub fn mode(mut self, mode: MiningMode) -> Self { - self.mode = mode; - self - } - - /// Consumes the type and returns all components - #[track_caller] - pub fn build( - self, - ) -> ( - AutoSealConsensus, - AutoSealClient, - MiningTask, - ) { - let Self { client, consensus, pool, mode, storage, to_engine, evm_config } = self; - let auto_client = AutoSealClient::new(storage.clone()); - let task = MiningTask::new( - Arc::clone(&consensus.chain_spec), - mode, - to_engine, - storage, - client, - pool, - evm_config, - ); - (consensus, auto_client, task) - } -} - -/// In memory storage -#[derive(Debug, Clone, Default)] -pub(crate) struct Storage { - inner: Arc>, -} - -// == impl Storage === - -impl Storage { - /// Initializes the [Storage] with the given best block. This should be initialized with the - /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { - let (header, best_hash) = best_block.split(); - let mut storage = StorageInner { - best_hash, - total_difficulty: header.difficulty, - best_block: header.number, - ..Default::default() - }; - storage.headers.insert(header.number, header); - storage.bodies.insert(best_hash, BlockBody::default()); - Self { inner: Arc::new(RwLock::new(storage)) } - } - - /// Returns the write lock of the storage - pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { - self.inner.write().await - } - - /// Returns the read lock of the storage - pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { - self.inner.read().await - } -} - -/// In-memory storage for the chain the auto seal engine is building. -#[derive(Default, Debug)] -pub(crate) struct StorageInner { - /// Headers buffered for download. - pub(crate) headers: HashMap, - /// A mapping between block hash and number. - pub(crate) hash_to_number: HashMap, - /// Bodies buffered for download. - pub(crate) bodies: HashMap, - /// Tracks best block - pub(crate) best_block: u64, - /// Tracks hash of best block - pub(crate) best_hash: B256, - /// The total difficulty of the chain until this block - pub(crate) total_difficulty: U256, -} - -// === impl StorageInner === - -impl StorageInner { - /// Returns the block hash for the given block number if it exists. - pub(crate) fn block_hash(&self, num: u64) -> Option { - self.hash_to_number.iter().find_map(|(k, v)| num.eq(v).then_some(*k)) - } - - /// Returns the matching header if it exists. - pub(crate) fn header_by_hash_or_number( - &self, - hash_or_num: BlockHashOrNumber, - ) -> Option
{ - let num = match hash_or_num { - BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, - BlockHashOrNumber::Number(num) => num, - }; - self.headers.get(&num).cloned() - } - - /// Inserts a new header+body pair - pub(crate) fn insert_new_block(&mut self, mut header: Header, body: BlockBody) { - header.number = self.best_block + 1; - header.parent_hash = self.best_hash; - - self.best_hash = header.hash_slow(); - self.best_block = header.number; - self.total_difficulty += header.difficulty; - - trace!(target: "consensus::auto", num=self.best_block, hash=?self.best_hash, "inserting new block"); - self.headers.insert(header.number, header); - self.bodies.insert(self.best_hash, body); - self.hash_to_number.insert(self.best_hash, self.best_block); - } - - /// Fills in pre-execution header fields based on the current best block and given - /// transactions. - pub(crate) fn build_header_template( - &self, - timestamp: u64, - transactions: &[TransactionSigned], - ommers: &[Header], - withdrawals: Option<&Withdrawals>, - requests: Option<&Requests>, - chain_spec: &ChainSpec, - ) -> Header - where - ChainSpec: EthChainSpec + EthereumHardforks, - { - // check previous block for base fee - let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { - parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) - }); - - let blob_gas_used = chain_spec.is_cancun_active_at_timestamp(timestamp).then(|| { - transactions - .iter() - .filter_map(|tx| tx.transaction.as_eip4844()) - .map(|blob_tx| blob_tx.blob_gas()) - .sum::() - }); - - let mut header = Header { - parent_hash: self.best_hash, - ommers_hash: proofs::calculate_ommers_root(ommers), - transactions_root: proofs::calculate_transaction_root(transactions), - withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), - difficulty: U256::from(2), - number: self.best_block + 1, - gas_limit: chain_spec.max_gas_limit(), - timestamp, - base_fee_per_gas, - blob_gas_used, - requests_hash: requests.map(|r| r.requests_hash()), - ..Default::default() - }; - - if chain_spec.is_cancun_active_at_timestamp(timestamp) { - let parent = self.headers.get(&self.best_block); - header.parent_beacon_block_root = - parent.and_then(|parent| parent.parent_beacon_block_root); - header.blob_gas_used = Some(0); - - let (parent_excess_blob_gas, parent_blob_gas_used) = match parent { - Some(parent) if chain_spec.is_cancun_active_at_timestamp(parent.timestamp) => ( - parent.excess_blob_gas.unwrap_or_default(), - parent.blob_gas_used.unwrap_or_default(), - ), - _ => (0, 0), - }; - header.excess_blob_gas = - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } - - header - } - - /// Builds and executes a new block with the given transactions, on the provided executor. - /// - /// This returns the header of the executed block, as well as the poststate from execution. - #[allow(clippy::too_many_arguments)] - pub(crate) fn build_and_execute( - &mut self, - transactions: Vec, - ommers: Vec
, - provider: &Provider, - chain_spec: Arc, - executor: &Executor, - ) -> Result<(SealedHeader, ExecutionOutcome), BlockExecutionError> - where - Executor: BlockExecutorProvider, - Provider: StateProviderFactory, - ChainSpec: EthChainSpec + EthereumHardforks, - { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - - // if shanghai is active, include empty withdrawals - let withdrawals = - chain_spec.is_shanghai_active_at_timestamp(timestamp).then_some(Withdrawals::default()); - // if prague is active, include empty requests - let requests = - chain_spec.is_prague_active_at_timestamp(timestamp).then_some(Requests::default()); - - let header = self.build_header_template( - timestamp, - &transactions, - &ommers, - withdrawals.as_ref(), - requests.as_ref(), - &chain_spec, - ); - - let block = Block { - header, - body: BlockBody { - transactions, - ommers: ommers.clone(), - withdrawals: withdrawals.clone(), - }, - } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; - - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - - let mut db = StateProviderDatabase::new( - provider.latest().map_err(InternalBlockExecutionError::LatestBlock)?, - ); - - // execute the block - let block_execution_output = - executor.executor(&mut db).execute((&block, U256::ZERO).into())?; - let gas_used = block_execution_output.gas_used; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - - // todo(onbjerg): we should not pass requests around as this is building a block, which - // means we need to extract the requests from the execution output and compute the requests - // root here - - let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body.transactions, ommers, withdrawals }; - - trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); - - // now we need to update certain header fields with the results of the execution - header.state_root = db.state_root(hashed_state)?; - header.gas_used = gas_used; - - let receipts = execution_outcome.receipts_by_block(header.number); - - // update logs bloom - let receipts_with_bloom = - receipts.iter().map(|r| r.as_ref().unwrap().bloom_slow()).collect::>(); - header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | *r); - - // update receipts root - header.receipts_root = { - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .generic_receipts_root_slow(header.number, |receipts| { - reth_optimism_consensus::calculate_receipt_root_no_memo_optimism( - receipts, - &chain_spec, - header.timestamp, - ) - }) - .expect("Receipts is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(header.number).expect("Receipts is present"); - - receipts_root - }; - trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); - - // finally insert into storage - self.insert_new_block(header.clone(), body); - - // set new header with hash that should have been updated by insert_new_block - let new_header = SealedHeader::new(header, self.best_hash); - - Ok((new_header, execution_outcome)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition}; - use reth_primitives::Transaction; - - #[test] - fn test_block_hash() { - let mut storage = StorageInner::default(); - - // Define two block hashes and their corresponding block numbers. - let block_hash_1: BlockHash = B256::random(); - let block_number_1: BlockNumber = 1; - let block_hash_2: BlockHash = B256::random(); - let block_number_2: BlockNumber = 2; - - // Insert the block number and hash pairs into the `hash_to_number` map. - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Verify that `block_hash` returns the correct block hash for the given block number. - assert_eq!(storage.block_hash(block_number_1), Some(block_hash_1)); - assert_eq!(storage.block_hash(block_number_2), Some(block_hash_2)); - - // Test that `block_hash` returns `None` for a non-existent block number. - let block_number_3: BlockNumber = 3; - assert_eq!(storage.block_hash(block_number_3), None); - } - - #[test] - fn test_header_by_hash_or_number() { - let mut storage = StorageInner::default(); - - // Define block numbers, headers, and hashes. - let block_number_1: u64 = 1; - let block_number_2: u64 = 2; - let header_1 = Header { number: block_number_1, ..Default::default() }; - let header_2 = Header { number: block_number_2, ..Default::default() }; - let block_hash_1: BlockHash = B256::random(); - let block_hash_2: BlockHash = B256::random(); - - // Insert headers and hash-to-number mappings. - storage.headers.insert(block_number_1, header_1.clone()); - storage.headers.insert(block_number_2, header_2.clone()); - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Test header retrieval by block number. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_1)), - Some(header_1.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_2)), - Some(header_2.clone()) - ); - - // Test header retrieval by block hash. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_1)), - Some(header_1) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_2)), - Some(header_2) - ); - - // Test non-existent block number and hash. - assert_eq!(storage.header_by_hash_or_number(BlockHashOrNumber::Number(999)), None); - let non_existent_hash: BlockHash = B256::random(); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(non_existent_hash)), - None - ); - } - - #[test] - fn test_insert_new_block() { - let mut storage = StorageInner::default(); - - // Define headers and block bodies. - let header_1 = Header { difficulty: U256::from(100), ..Default::default() }; - let body_1 = BlockBody::default(); - let header_2 = Header { difficulty: U256::from(200), ..Default::default() }; - let body_2 = BlockBody::default(); - - // Insert the first block. - storage.insert_new_block(header_1.clone(), body_1.clone()); - let best_block_1 = storage.best_block; - let best_hash_1 = storage.best_hash; - - // Verify the block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_1), - Some(&Header { number: 1, ..header_1.clone() }) - ); - assert_eq!(storage.bodies.get(&best_hash_1), Some(&body_1)); - assert_eq!(storage.hash_to_number.get(&best_hash_1), Some(&best_block_1)); - - // Insert the second block. - storage.insert_new_block(header_2.clone(), body_2.clone()); - let best_block_2 = storage.best_block; - let best_hash_2 = storage.best_hash; - - // Verify the second block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_2), - Some(&Header { - number: 2, - parent_hash: Header { number: 1, ..header_1 }.hash_slow(), - ..header_2 - }) - ); - assert_eq!(storage.bodies.get(&best_hash_2), Some(&body_2)); - assert_eq!(storage.hash_to_number.get(&best_hash_2), Some(&best_block_2)); - - // Check that the total difficulty was updated. - assert_eq!(storage.total_difficulty, header_1.difficulty + header_2.difficulty); - } - - #[test] - fn test_build_basic_header_template() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - - let best_block_number = 1; - let best_block_hash = B256::random(); - let timestamp = 1_600_000_000; - - // Set up best block information - storage.best_block = best_block_number; - storage.best_hash = best_block_hash; - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify basic fields - assert_eq!(header.parent_hash, best_block_hash); - assert_eq!(header.number, best_block_number + 1); - assert_eq!(header.timestamp, timestamp); - assert_eq!(header.gas_limit, chain_spec.max_gas_limit); - } - - #[test] - fn test_ommers_and_transactions_roots() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Setup ommers and transactions - let ommers = vec![Header::default()]; - let transactions = vec![TransactionSigned::default()]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &ommers, - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify ommers and transactions roots - assert_eq!(header.ommers_hash, proofs::calculate_ommers_root(&ommers)); - assert_eq!(header.transactions_root, proofs::calculate_transaction_root(&transactions)); - } - - // Test base fee calculation from the parent block - #[test] - fn test_base_fee_calculation() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Set up the parent header with base fee - let base_fee = Some(100); - let parent_header = Header { base_fee_per_gas: base_fee, ..Default::default() }; - storage.headers.insert(storage.best_block, parent_header); - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify base fee is correctly propagated - assert_eq!(header.base_fee_per_gas, base_fee); - } - - // Test blob gas and excess blob gas calculation when Cancun is active - #[test] - fn test_blob_gas_calculation_cancun() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec { - hardforks: ChainHardforks::new(vec![( - EthereumHardfork::Cancun.boxed(), - ForkCondition::Timestamp(25), - )]), - ..Default::default() - }; - let timestamp = 26; - - // Set up a transaction with blob gas - let blob_tx = TransactionSigned { - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }; - let transactions = vec![blob_tx]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify that the header has the correct fields including blob gas - assert_eq!( - header, - Header { - parent_hash: B256::ZERO, - ommers_hash: proofs::calculate_ommers_root(&[]), - transactions_root: proofs::calculate_transaction_root(&transactions), - withdrawals_root: None, - difficulty: U256::from(2), - number: 1, - gas_limit: chain_spec.max_gas_limit, - timestamp, - base_fee_per_gas: None, - blob_gas_used: Some(0), - requests_hash: None, - excess_blob_gas: Some(0), - ..Default::default() - } - ); - } -} diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs deleted file mode 100644 index 82750c8e47b..00000000000 --- a/crates/consensus/auto-seal/src/mode.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! The mode the auto seal miner is operating in. - -use alloy_primitives::TxHash; -use futures_util::{stream::Fuse, StreamExt}; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - fmt, - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::Duration, -}; -use tokio::{sync::mpsc::Receiver, time::Interval}; -use tokio_stream::{wrappers::ReceiverStream, Stream}; - -/// Mode of operations for the `Miner` -#[derive(Debug)] -pub enum MiningMode { - /// A miner that does nothing - None, - /// A miner that listens for new transactions that are ready. - /// - /// Either one transaction will be mined per block, or any number of transactions will be - /// allowed - Auto(ReadyTransactionMiner), - /// A miner that constructs a new block every `interval` tick - FixedBlockTime(FixedBlockTimeMiner), -} - -// === impl MiningMode === - -impl MiningMode { - /// Creates a new instant mining mode that listens for new transactions and tries to build - /// non-empty blocks as soon as transactions arrive. - pub fn instant(max_transactions: usize, listener: Receiver) -> Self { - Self::Auto(ReadyTransactionMiner { - max_transactions, - has_pending_txs: None, - rx: ReceiverStream::new(listener).fuse(), - }) - } - - /// Creates a new interval miner that builds a block ever `duration`. - pub fn interval(duration: Duration) -> Self { - Self::FixedBlockTime(FixedBlockTimeMiner::new(duration)) - } - - /// polls the Pool and returns those transactions that should be put in a block, if any. - pub(crate) fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - match self { - Self::None => Poll::Pending, - Self::Auto(miner) => miner.poll(pool, cx), - Self::FixedBlockTime(miner) => miner.poll(pool, cx), - } - } -} - -impl fmt::Display for MiningMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let kind = match self { - Self::None => "None", - Self::Auto(_) => "Auto", - Self::FixedBlockTime(_) => "FixedBlockTime", - }; - write!(f, "{kind}") - } -} - -/// A miner that's supposed to create a new block every `interval`, mining all transactions that are -/// ready at that time. -/// -/// The default blocktime is set to 6 seconds -#[derive(Debug)] -pub struct FixedBlockTimeMiner { - /// The interval this fixed block time miner operates with - interval: Interval, -} - -// === impl FixedBlockTimeMiner === - -impl FixedBlockTimeMiner { - /// Creates a new instance with an interval of `duration` - pub(crate) fn new(duration: Duration) -> Self { - let start = tokio::time::Instant::now() + duration; - Self { interval: tokio::time::interval_at(start, duration) } - } - - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - if self.interval.poll_tick(cx).is_ready() { - // drain the pool - return Poll::Ready(pool.best_transactions().collect()) - } - Poll::Pending - } -} - -impl Default for FixedBlockTimeMiner { - fn default() -> Self { - Self::new(Duration::from_secs(6)) - } -} - -/// A miner that Listens for new ready transactions -pub struct ReadyTransactionMiner { - /// how many transactions to mine per block - max_transactions: usize, - /// stores whether there are pending transactions (if known) - has_pending_txs: Option, - /// Receives hashes of transactions that are ready - rx: Fuse>, -} - -// === impl ReadyTransactionMiner === - -impl ReadyTransactionMiner { - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - // drain the notification stream - while let Poll::Ready(Some(_hash)) = Pin::new(&mut self.rx).poll_next(cx) { - self.has_pending_txs = Some(true); - } - - if self.has_pending_txs == Some(false) { - return Poll::Pending - } - - let transactions = pool.best_transactions().take(self.max_transactions).collect::>(); - - // there are pending transactions if we didn't drain the pool - self.has_pending_txs = Some(transactions.len() >= self.max_transactions); - - if transactions.is_empty() { - return Poll::Pending - } - - Poll::Ready(transactions) - } -} - -impl fmt::Debug for ReadyTransactionMiner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReadyTransactionMiner") - .field("max_transactions", &self.max_transactions) - .finish_non_exhaustive() - } -} diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs deleted file mode 100644 index 75ddda90861..00000000000 --- a/crates/consensus/auto-seal/src/task.rs +++ /dev/null @@ -1,221 +0,0 @@ -use crate::{mode::MiningMode, Storage}; -use alloy_rpc_types_engine::ForkchoiceState; -use futures_util::{future::BoxFuture, FutureExt}; -use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; -use reth_evm::execute::BlockExecutorProvider; -use reth_provider::{CanonChainTracker, StateProviderFactory}; -use reth_stages_api::PipelineEvent; -use reth_tokio_util::EventStream; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - collections::VecDeque, - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tracing::{debug, error, warn}; - -/// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { - /// The configured chain spec - chain_spec: Arc, - /// The client used to interact with the state - client: Client, - /// The active miner - miner: MiningMode, - /// Single active future that inserts a new block into `storage` - insert_task: Option>>>, - /// Shared storage to insert new blocks - storage: Storage, - /// Pool where transactions are stored - pool: Pool, - /// backlog of sets of transactions ready to be mined - queued: VecDeque::Transaction>>>>, - // TODO: ideally this would just be a sender of hashes - to_engine: UnboundedSender>, - /// The pipeline events to listen on - pipe_line_events: Option>, - /// The type used for block execution - block_executor: Executor, -} - -// === impl MiningTask === - -impl - MiningTask -{ - /// Creates a new instance of the task - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - chain_spec: Arc, - miner: MiningMode, - to_engine: UnboundedSender>, - storage: Storage, - client: Client, - pool: Pool, - block_executor: Executor, - ) -> Self { - Self { - chain_spec, - client, - miner, - insert_task: None, - storage, - pool, - to_engine, - queued: Default::default(), - pipe_line_events: None, - block_executor, - } - } - - /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: EventStream) { - self.pipe_line_events = Some(events); - } -} - -impl Future - for MiningTask -where - Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, - Engine: EngineTypes, - Executor: BlockExecutorProvider, - ChainSpec: EthChainSpec + EthereumHardforks + 'static, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // this drives block production and - loop { - if let Poll::Ready(transactions) = this.miner.poll(&this.pool, cx) { - // miner returned a set of transaction that we feed to the producer - this.queued.push_back(transactions); - } - - if this.insert_task.is_none() { - if this.queued.is_empty() { - // nothing to insert - break - } - - // ready to queue in new insert task - let storage = this.storage.clone(); - let transactions = this.queued.pop_front().expect("not empty"); - - let to_engine = this.to_engine.clone(); - let client = this.client.clone(); - let chain_spec = Arc::clone(&this.chain_spec); - let events = this.pipe_line_events.take(); - let executor = this.block_executor.clone(); - - // Create the mining future that creates a block, notifies the engine that drives - // the pipeline - this.insert_task = Some(Box::pin(async move { - let mut storage = storage.write().await; - - let transactions: Vec<_> = transactions - .into_iter() - .map(|tx| { - let recovered = tx.to_recovered_transaction(); - recovered.into_signed() - }) - .collect(); - let ommers = vec![]; - - match storage.build_and_execute( - transactions.clone(), - ommers.clone(), - &client, - chain_spec, - &executor, - ) { - Ok((new_header, _bundle_state)) => { - let state = ForkchoiceState { - head_block_hash: new_header.hash(), - finalized_block_hash: new_header.hash(), - safe_block_hash: new_header.hash(), - }; - drop(storage); - - // TODO: make this a future - // await the fcu call rx for SYNCING, then wait for a VALID response - loop { - // send the new update to the engine, this will trigger the engine - // to download and execute the block we just inserted - let (tx, rx) = oneshot::channel(); - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs: None, - tx, - version: EngineApiMessageVersion::default(), - }); - debug!(target: "consensus::auto", ?state, "Sent fork choice update"); - - match rx.await.unwrap() { - Ok(fcu_response) => { - match fcu_response.forkchoice_status() { - ForkchoiceStatus::Valid => break, - ForkchoiceStatus::Invalid => { - error!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned invalid response"); - return None - } - ForkchoiceStatus::Syncing => { - debug!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); - // wait for the next fork choice update - continue - } - } - } - Err(err) => { - error!(target: "consensus::auto", %err, "Autoseal fork choice update failed"); - return None - } - } - } - - // update canon chain for rpc - client.set_canonical_head(new_header.clone()); - client.set_safe(new_header.clone()); - client.set_finalized(new_header.clone()); - } - Err(err) => { - warn!(target: "consensus::auto", %err, "failed to execute block") - } - } - - events - })); - } - - if let Some(mut fut) = this.insert_task.take() { - match fut.poll_unpin(cx) { - Poll::Ready(events) => { - this.pipe_line_events = events; - } - Poll::Pending => { - this.insert_task = Some(fut); - break - } - } - } - } - - Poll::Pending - } -} - -impl - std::fmt::Debug for MiningTask -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MiningTask").finish_non_exhaustive() - } -} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 49663ffc2cc..69bbeeb5b43 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -24,7 +24,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 17a952a58d3..95542411d21 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,7 +2,6 @@ use std::sync::Arc; -use reth_auto_seal_consensus::AutoSealConsensus; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -335,11 +334,7 @@ where type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) } } diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index f0fcaf64524..b6d0ffcfaaa 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,12 +1,10 @@ use std::sync::Arc; -use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_e2e_test_utils::setup; use reth_node_api::FullNodeComponents; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, @@ -17,16 +15,6 @@ use reth_tasks::TaskManager; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = - setup::(1, custom_chain(), true, eth_payload_attributes).await?; - - assert_chain_advances(nodes.pop().unwrap().inner).await; - Ok(()) -} - -#[tokio::test] -async fn can_run_dev_node_new_engine() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 86f755cb920..4ef2b0728e0 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] ## reth -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index ac2339fa6cf..856f86c6fe0 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -5,7 +5,6 @@ use std::{sync::Arc, thread::available_parallelism}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, @@ -16,6 +15,7 @@ use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; @@ -52,8 +52,9 @@ use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTar use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; +use reth_transaction_pool::TransactionPool; use tokio::sync::{ - mpsc::{unbounded_channel, Receiver, UnboundedSender}, + mpsc::{unbounded_channel, UnboundedSender}, oneshot, watch, }; @@ -386,13 +387,11 @@ impl LaunchContextWith) -> MiningMode { + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { if let Some(interval) = self.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) } else { - MiningMode::instant(1, pending_transactions_listener) + MiningMode::instant(pool) } } } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 85401b8b958..65433176ba9 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -8,7 +8,7 @@ use reth_beacon_consensus::{ use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; -use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder, MiningMode}; +use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -208,11 +208,6 @@ where info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); let mut engine_service = if ctx.is_dev() { - let mining_mode = if let Some(block_time) = ctx.node_config().dev.block_time { - MiningMode::interval(block_time) - } else { - MiningMode::instant(ctx.components().pool().clone()) - }; let eth_service = LocalEngineService::new( ctx.consensus(), ctx.components().block_executor().clone(), @@ -225,7 +220,7 @@ where ctx.sync_metrics_tx(), consensus_engine_tx.clone(), Box::pin(consensus_engine_stream), - mining_mode, + ctx.dev_mining_mode(ctx.components().pool()), LocalPayloadAttributesBuilder::new(ctx.chain_spec()), ); diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a623a7a9f23..4f9e850c97f 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,7 +11,6 @@ pub use exex::ExExLauncher; use std::{future::Future, sync::Arc}; -use alloy_primitives::utils::format_ether; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -33,7 +32,6 @@ use reth_provider::providers::BlockchainProvider; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -210,47 +208,7 @@ where let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let (pipeline, client) = if ctx.is_dev() { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in ctx.chain_spec().genesis().alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let mining_mode = - ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); - info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - ctx.chain_spec(), - ctx.blockchain_db().clone(), - ctx.components().pool().clone(), - consensus_engine_tx.clone(), - mining_mode, - ctx.components().block_executor().clone(), - ) - .build(); - - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - client.clone(), - ctx.consensus(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - ctx.task_executor().spawn(Box::pin(task)); - - (pipeline, Either::Left(client)) + eyre::bail!("Dev mode is not supported for legacy engine") } else { let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, @@ -266,7 +224,7 @@ where pipeline_exex_handle, )?; - (pipeline, Either::Right(network_client.clone())) + (pipeline, network_client.clone()) }; let pipeline_events = pipeline.events(); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index deabbac5249..7674cbd37c0 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -16,7 +16,6 @@ reth-chainspec.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true -reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -76,7 +75,6 @@ optimism = [ "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", "revm/optimism", - "reth-auto-seal-consensus/optimism", "reth-optimism-rpc/optimism", "reth-engine-local/optimism", "reth-optimism-consensus/optimism", diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index e39fdfc27a8..a09dfbaa562 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -467,11 +467,7 @@ where type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 6ed91e79656..f78abe96122 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -82,7 +82,6 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ Different consensus mechanisms. - [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation) -- [`consensus/auto-seal`](../../crates/consensus/auto-seal): A consensus mechanism that auto-seals blocks for local development (also commonly known as "auto-mine") - [`consensus/beacon`](../../crates/consensus/beacon): Consensus mechanism that handles messages from a beacon node ("eth2") ### Execution From dbe8c83b482c4477effe2f71b20cde436c4b23bd Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:32:48 -0600 Subject: [PATCH 368/970] renamed OptimismInvalidTransactionError to OpInvalidTransactionError (#12384) --- crates/optimism/rpc/src/error.rs | 14 +++++++------- crates/optimism/rpc/src/lib.rs | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index c4027340039..078da8fe4d1 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -25,7 +25,7 @@ pub enum OpEthApiError { L1BlockGasError, /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). #[error(transparent)] - InvalidTransaction(#[from] OptimismInvalidTransactionError), + InvalidTransaction(#[from] OpInvalidTransactionError), /// Sequencer client error. #[error(transparent)] Sequencer(#[from] SequencerClientError), @@ -55,7 +55,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { /// Optimism specific invalid transaction errors #[derive(thiserror::Error, Debug)] -pub enum OptimismInvalidTransactionError { +pub enum OpInvalidTransactionError { /// A deposit transaction was submitted as a system transaction post-regolith. #[error("no system transactions allowed after regolith")] DepositSystemTxPostRegolith, @@ -64,18 +64,18 @@ pub enum OptimismInvalidTransactionError { HaltedDepositPostRegolith, } -impl From for jsonrpsee_types::error::ErrorObject<'static> { - fn from(err: OptimismInvalidTransactionError) -> Self { +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OpInvalidTransactionError) -> Self { match err { - OptimismInvalidTransactionError::DepositSystemTxPostRegolith | - OptimismInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::DepositSystemTxPostRegolith | + OpInvalidTransactionError::HaltedDepositPostRegolith => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } } } } -impl TryFrom for OptimismInvalidTransactionError { +impl TryFrom for OpInvalidTransactionError { type Error = InvalidTransaction; fn try_from(err: InvalidTransaction) -> Result { diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 0ff1451d05b..44d0fa35389 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -14,6 +14,6 @@ pub mod error; pub mod eth; pub mod sequencer; -pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; +pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpReceiptBuilder}; pub use sequencer::SequencerClient; From eb7bb08b51555fd42c112dbbc0bd0efa8d8d816e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Nov 2024 00:29:17 +0100 Subject: [PATCH 369/970] fix: remove independent tx from all (#12387) --- crates/transaction-pool/src/pool/best.rs | 11 +++++- crates/transaction-pool/src/pool/pending.rs | 43 +++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 36a14edaa23..068ef989953 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -128,6 +128,15 @@ impl BestTransactions { } } + /// Removes the currently best independent transaction from the independent set and the total + /// set. + fn pop_best(&mut self) -> Option> { + self.independent.pop_last().inspect(|best| { + let removed = self.all.remove(best.transaction.id()); + debug_assert!(removed.is_some(), "must be present in both sets"); + }) + } + /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { @@ -167,7 +176,7 @@ impl Iterator for BestTransactions { loop { self.add_new_transactions(); // Remove the next independent tx with the highest priority - let best = self.independent.pop_last()?; + let best = self.pop_best()?; let sender_id = best.transaction.sender_id(); // skip transactions for which sender was marked as invalid diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ff5269014c4..c9cfd85e288 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -815,4 +815,47 @@ mod tests { pending.into_iter().map(|tx| (tx.sender(), tx.nonce())).collect::>(); assert_eq!(pending, expected_pending); } + + // + #[test] + fn test_eligible_updates_promoted() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_senders = 10; + + let first_txs: Vec<_> = (0..num_senders) // + .map(|_| MockTransaction::eip1559()) + .collect(); + let second_txs: Vec<_> = + first_txs.iter().map(|tx| tx.clone().rng_hash().inc_nonce()).collect(); + + for tx in first_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best(); + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 0); + } else { + panic!("cannot read one of first_txs"); + } + } + + for tx in second_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 1); + } else { + panic!("cannot read one of second_txs"); + } + } + } } From c7b6a351133266423195349ff8b28274aceec4fc Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 8 Nov 2024 04:46:24 +0400 Subject: [PATCH 370/970] feat: bump alloy (#12391) --- Cargo.lock | 242 +++++++++++---------- Cargo.toml | 64 +++--- crates/optimism/rpc/src/eth/transaction.rs | 13 +- crates/rpc/rpc/src/eth/helpers/types.rs | 23 +- 4 files changed, 189 insertions(+), 153 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60bf29c2a07..4fb7b26f2b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.46" +version = "0.1.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836cf02383d9ebb35502d379bcd1ae803155094077eaab9c29131d888cd5fa3e" +checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7109d565c7157ee2c10beea7911a71130aa6c3cb6dfeaf66905a98f69b96a754" +checksum = "b19fd285b55dd39ae0dbc37481ad9f5f48898726f76335a2d6167a85a5fa41da" dependencies = [ "alloy-eips", "alloy-primitives", @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f16c29a39afa238e35ee4ba06ca2e1c3a4764c2096e94c66730688a0471be7" +checksum = "4f42b1cb3fa8cba51b45795097a0d58a34569ca5db9eda48f63230e22fbc5cb5" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711de3f04cf728259ff149f725df12a8595b6b10baefafb0a0447201c72d76de" +checksum = "21aff0f2c921246398cad88e32a1d8ec14359b183afbc3dcb816873714cafc1a" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76b8fa6253466bd6f4b5ba3d725d350f7a05de494dd1b8d01537eafe934667e9" +checksum = "a76d899cfbfa13c5ed044383b7ae0e6a4d6ffcad3fd25e4acf71ff1c255ddae0" dependencies = [ "alloy-primitives", "alloy-serde", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9278d6d554510136d9e0e4e51de4f5a9a4baffc8975f29e9acd01e12b2e045c" +checksum = "e244937365749c09c403d3054de39cc7dd46e3c3a12e5b164106af4903011ab1" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85d356983dea86089b05674d5ef88a7168a5c34a523ef62e2e3c8a9847ce0822" +checksum = "0a28811461dc37e28db92b6d3a8c03a5883f2100b270a6294af00710bf4a0be4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fbc9778e7989877465888383a7533c7318a9200d7229336bcc2b0277df36ba" +checksum = "3e517c44a97e753f10dc0736215ba4677da5e2fbc1451e3e76902e02cd6cff12" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe5fd811738d37c56318378802b7bc3cc44e4d12b532641374309a10a04c515" +checksum = "15bf1a4b35b071c2d6f21fd3d32b8c5466cb7ed31fd4a4473a4e2ce180729121" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -322,7 +322,7 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "getrandom 0.2.15", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "hex-literal", "indexmap 2.6.0", "itoa", @@ -341,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe3189c8cf3c3e9185862ac0d0b2a9d6bf00e4395746c7ec36307a4db0d5d486" +checksum = "56befb85784c7eb4f163b9aed7cdcaba09d5b07f8e59d6c12ad0ce1acf67c0fd" dependencies = [ "alloy-chains", "alloy-consensus", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51358f866bcb93b8440c08086557e415c455cdc8d63754fa339611a3e215b038" +checksum = "a6480f9596064db2ca8e1a4b710ea9a4ef420534e68640296a461b71f6bfadc1" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +423,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d847913cea3fcd64fb1fe1247fafe15aab4060a2d24e535bbffcaa4670de9a79" +checksum = "cb49d38b3279a07e864d973323534a2c4a845e16f2c0153a509a3abcc01da7b1" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ef8fd215cf81ddb0565815e1592a87377fa1e259db8ca4e683e6659fdf5c08" +checksum = "90be9542c6c9bb0d21ac08104ca0a3d1fb83e56f1c704f5cdcf6fb9e01fcbd75" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647f703e27edad1f9c97455a6434378ea70b4ca9ae95f5e1559acf354c69bc14" +checksum = "410e7b9d67489d19ad52439b940fbf482e0823190d8245242bfff1eec44290d5" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +473,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6805b1626b084c231b2ec70c05090d45ce914d22e47f6cd4e8426f43098bbdf1" +checksum = "951f9106bb02ad00a2dc2eb7b400041a2c073d7fb8f33e2f1f29b2f71564f3f7" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ed50d4f427bcb5bc561b3e6f45238158db6592deabcfbecb03c7ca9dadafe98" +checksum = "dab9821d5a73f56512ddd8e3db89a5bbb285353129b271c4ad6803a37c4e00ce" dependencies = [ "alloy-eips", "alloy-primitives", @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00a212581221f03d18c4239a1b985d695205a9518468a0b11ef64a143dd0724" +checksum = "ebe68f35cafc465442862421ae2d123bb58c8df25f837d8866bf5fc278b74a52" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7312fb85ef76428f8e20f50d1505494be9d081ffdb5cbf6a25c153c7b530994c" +checksum = "5ed9e7b3233cb3e0aaeaedc4e21e1ea9d99e947a7206241a9f9521c138193978" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba7afa617e7942ba5df88ca063a99e9f51e67df2de816fd52513e64926145a3" +checksum = "be10f130b8be7c2351a3ea64b4bf07020fde5be8d1ac18db9a9a3496aa22bb19" dependencies = [ "alloy-consensus", "alloy-eips", @@ -551,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b479e525a57388821d05c99732b3f6195128d8b74c9372329287f5e0d47d0aa" +checksum = "110f7dbee6f047915eb8915751d96402f6d02cb6e5f64286f10949eaa5bed841" dependencies = [ "alloy-eips", "alloy-primitives", @@ -564,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "563105a7fb420d44bd30bfe043f5bba8b6fe78432d8da99f4148aa7226d90d69" +checksum = "5d4f7f183d06db1457b58c6d618ff7ab92c97810138c148e09edb14ed2001069" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -578,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0da9410a730ced6e30cd349e6d9f39bc9e37ca1bb58a39691e276d7a4061631" +checksum = "f85580d4e78ffd765086ebf640004a773e3c335ebbfaa5666e13a0640c4957fe" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c9f13d8c9180dcced875f91f1876e428941cec151fc501637f68ad30d088d89" +checksum = "1493df14770a23b1e32d22c66fa22508d09e0a99d6923a45f179ff7887ca0cef" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,9 +602,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "796f951bcd6a00f9fb53265676eed9fab6feb37d1eb912b70fc2654be5e5a560" +checksum = "ebff64a3b4062eba217404700d1517b9bf3ff9a7a5b2dd03f1cf8aeec3e9a6b8" dependencies = [ "alloy-primitives", "async-trait", @@ -616,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e2a3fb629bbe89cfba73699a4be64d6dc3bd73691f2e43f2a35448294ffbf9" +checksum = "bc1f6602be452e3bb5b6c2fe0fa0f966465f9e9bfd6ad7691bfe1bd8b74bf432" dependencies = [ "alloy-consensus", "alloy-network", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d315ab988e06f6b12038a3d7811957da28a8b378ff0d084b0819ebae1746ead" +checksum = "64534da7f71ecca86b3449adec19b7942fb0905b9f392f60054a02a5f686f71f" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166449a8d8867be0978d6fa85aa56b89a27988b09e57bfd1f3b9962a9c8d5bae" +checksum = "617b5ab96f4fb64ef697a84c68ec8534c062baafbdb0529c34aaee43324f0d5a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716b21dce8e7ea29a5d459ed4b6d29a13a71d2b766fd84ac15e68623662b5d87" +checksum = "10043df9ea36e3a38056cdfc3a70138343caef4eec6df66d6cbfdd348d245828" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b577974182a4e6d9b1a1ecd5a7fd48da9d239a1e214e2368d37b3179e86cd8c3" +checksum = "b6a43ecdbc8f79cb5d7f54e2118626f873ded93c8c040fb714ce6be47dc5b526" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd7f8b3a7c65ca09b3c7bdd7c7d72d7423d026f5247eda96af53d24e58315c1" +checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -817,9 +817,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -866,9 +866,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "aquamarine" @@ -886,9 +886,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -1651,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.34" +version = "1.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b9470d453346108f93a59222a9a1a5724db32d0a4727b7ab7ace4b4d822dc9" +checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" dependencies = [ "jobserver", "libc", @@ -2389,9 +2389,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", @@ -3551,9 +3551,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -4012,12 +4012,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -4094,7 +4105,7 @@ checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] @@ -4567,9 +4578,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -4740,7 +4751,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -4866,7 +4877,7 @@ checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "indexmap 2.6.0", "metrics", "ordered-float", @@ -5274,9 +5285,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dabf6e7d7d63b2c6ed746b24d334e16388c6c3921bc50172440c72aa923c6b4a" +checksum = "e33097177de330b1a83e0a882ae752ad55f23962b1e310176d1623655c18421e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5292,9 +5303,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f901aa077832e22820c644d63d2e5e48601eed0f06e40f2a26d1b2a89bd17dec" +checksum = "2232ff799352932fc5484e1c63ee7bb1e74a79ac7b94a4f7318560fba21167de" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5306,9 +5317,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2e7db7997b12c1f364a3bd54b35338357f44c8e2e533a81ebf625104d80110" +checksum = "7f1021b644a8f0bf8d7f878aa5328da67c7d697e476c8e097d09e05585067713" dependencies = [ "alloy-consensus", "alloy-network", @@ -5321,9 +5332,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9226c7618f45f1d1e1f1112230818d5cfa719da9f5ca05fa28eaeb44d024181" +checksum = "a566c421638a3b655a2aaf59fbbdee017a7dce6acfbacead219861e14654b98d" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5341,9 +5352,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d60079165fe9a4be99b04865d8746c6c9c7b505be2fdce8982f677ca18c3cc10" +checksum = "72298f3f9084773dc3feaf88b08db82ceb3e3e13f98280459d869accb3f14234" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5360,9 +5371,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a5b505325e343b299b1c574b2b8542f6ac3101e0d92a1c909b2d7dd74665f1" +checksum = "f2a270e6370a0fa8a673e29bcd436cbb67b5dc88cefc1d00fbf2382673894f71" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5416,9 +5427,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "4.3.0" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d501f1a72f71d3c063a6bbc8f7271fa73aa09fe5d6283b6571e2ed176a2537" +checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" dependencies = [ "num-traits", ] @@ -6020,9 +6031,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ "cfg_aliases", "libc", @@ -9594,9 +9605,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags 2.6.0", "errno", @@ -10318,9 +10329,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb" dependencies = [ "debugid", "memmap2", @@ -10330,9 +10341,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10497,18 +10508,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.67" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3c6efbfc763e64eb85c11c25320f0737cb7364c4b6336db90aa9ebe27a0bbd" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.67" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b607164372e89797d78b8e23a6d67d5d1038c1c65efd52e1389ef8b77caba2a6" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", @@ -10665,9 +10676,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -10992,11 +11003,12 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" +checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", + "windows-targets 0.52.6", ] [[package]] @@ -11204,12 +11216,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index dc783f071a7..4471ce32baa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -430,46 +430,46 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.0", default-features = false } -alloy-contract = { version = "0.6.0", default-features = false } -alloy-eips = { version = "0.6.0", default-features = false } -alloy-genesis = { version = "0.6.0", default-features = false } -alloy-json-rpc = { version = "0.6.0", default-features = false } -alloy-network = { version = "0.6.0", default-features = false } -alloy-network-primitives = { version = "0.6.0", default-features = false } -alloy-node-bindings = { version = "0.6.0", default-features = false } -alloy-provider = { version = "0.6.0", features = [ +alloy-consensus = { version = "0.6.2", default-features = false } +alloy-contract = { version = "0.6.2", default-features = false } +alloy-eips = { version = "0.6.2", default-features = false } +alloy-genesis = { version = "0.6.2", default-features = false } +alloy-json-rpc = { version = "0.6.2", default-features = false } +alloy-network = { version = "0.6.2", default-features = false } +alloy-network-primitives = { version = "0.6.2", default-features = false } +alloy-node-bindings = { version = "0.6.2", default-features = false } +alloy-provider = { version = "0.6.2", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.0", default-features = false } -alloy-rpc-client = { version = "0.6.0", default-features = false } -alloy-rpc-types = { version = "0.6.0", features = [ +alloy-pubsub = { version = "0.6.2", default-features = false } +alloy-rpc-client = { version = "0.6.2", default-features = false } +alloy-rpc-types = { version = "0.6.2", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.0", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.0", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.0", default-features = false } -alloy-rpc-types-debug = { version = "0.6.0", default-features = false } -alloy-rpc-types-engine = { version = "0.6.0", default-features = false } -alloy-rpc-types-eth = { version = "0.6.0", default-features = false } -alloy-rpc-types-mev = { version = "0.6.0", default-features = false } -alloy-rpc-types-trace = { version = "0.6.0", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.0", default-features = false } -alloy-serde = { version = "0.6.0", default-features = false } -alloy-signer = { version = "0.6.0", default-features = false } -alloy-signer-local = { version = "0.6.0", default-features = false } -alloy-transport = { version = "0.6.0" } -alloy-transport-http = { version = "0.6.0", features = [ +alloy-rpc-types-admin = { version = "0.6.2", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.2", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.2", default-features = false } +alloy-rpc-types-debug = { version = "0.6.2", default-features = false } +alloy-rpc-types-engine = { version = "0.6.2", default-features = false } +alloy-rpc-types-eth = { version = "0.6.2", default-features = false } +alloy-rpc-types-mev = { version = "0.6.2", default-features = false } +alloy-rpc-types-trace = { version = "0.6.2", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.2", default-features = false } +alloy-serde = { version = "0.6.2", default-features = false } +alloy-signer = { version = "0.6.2", default-features = false } +alloy-signer-local = { version = "0.6.2", default-features = false } +alloy-transport = { version = "0.6.2" } +alloy-transport-http = { version = "0.6.2", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.0", default-features = false } -alloy-transport-ws = { version = "0.6.0", default-features = false } +alloy-transport-ipc = { version = "0.6.2", default-features = false } +alloy-transport-ws = { version = "0.6.2", default-features = false } # op -op-alloy-rpc-types = "0.6.2" -op-alloy-rpc-types-engine = "0.6.2" -op-alloy-network = "0.6.2" -op-alloy-consensus = "0.6.2" +op-alloy-rpc-types = "0.6.3" +op-alloy-rpc-types-engine = "0.6.3" +op-alloy-network = "0.6.3" +op-alloy-consensus = "0.6.3" # misc aquamarine = "0.6" diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index b019ce0e97f..f62dbbf98cc 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,6 +1,6 @@ //! Loads and formats OP transaction RPC response. -use alloy_consensus::Signed; +use alloy_consensus::{Signed, Transaction as _}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; @@ -110,7 +110,15 @@ where .flatten() .and_then(|receipt| receipt.deposit_receipt_version); - let TransactionInfo { block_hash, block_number, index: transaction_index, .. } = tx_info; + let TransactionInfo { + block_hash, block_number, index: transaction_index, base_fee, .. + } = tx_info; + + let effective_gas_price = base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()); Transaction { inner: alloy_rpc_types::Transaction { @@ -119,6 +127,7 @@ where block_number, transaction_index, from, + effective_gas_price: Some(effective_gas_price), }, deposit_receipt_version, } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index b86e32f046f..af0a3cbef8f 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,6 +1,6 @@ //! L1 `eth` API types. -use alloy_consensus::{Signed, TxEip4844Variant, TxEnvelope}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; @@ -24,7 +24,7 @@ where let from = tx.signer(); let TransactionSigned { transaction, signature, hash } = tx.into_signed(); - let inner = match transaction { + let inner: TxEnvelope = match transaction { reth_primitives::Transaction::Legacy(tx) => { Signed::new_unchecked(tx, signature, hash).into() } @@ -44,9 +44,24 @@ where _ => unreachable!(), }; - let TransactionInfo { block_hash, block_number, index: transaction_index, .. } = tx_info; + let TransactionInfo { + block_hash, block_number, index: transaction_index, base_fee, .. + } = tx_info; - Transaction { inner, block_hash, block_number, transaction_index, from } + let effective_gas_price = base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()); + + Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + effective_gas_price: Some(effective_gas_price), + } } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { From 87f328f957d3d9be478286b8e74314a5d5fabc13 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 7 Nov 2024 23:33:30 -0600 Subject: [PATCH 371/970] replace BuilderBlockValidationRequestV3 with alloy type (#12396) --- Cargo.lock | 2 -- crates/ethereum/node/tests/e2e/rpc.rs | 6 +++-- crates/rpc/rpc-api/Cargo.toml | 2 -- crates/rpc/rpc-api/src/lib.rs | 5 +--- crates/rpc/rpc-api/src/validation.rs | 39 ++------------------------- crates/rpc/rpc/src/validation.rs | 6 ++--- 6 files changed, 9 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4fb7b26f2b6..809d4be3135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8701,8 +8701,6 @@ dependencies = [ "reth-engine-primitives", "reth-network-peers", "reth-rpc-eth-api", - "serde", - "serde_with", ] [[package]] diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index 94ae997eed6..b1a11b1b5eb 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -2,12 +2,14 @@ use crate::utils::eth_payload_attributes; use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; -use alloy_rpc_types_beacon::relay::{BidTrace, SignedBidSubmissionV3, SignedBidSubmissionV4}; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + SignedBidSubmissionV3, SignedBidSubmissionV4, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth::{ payload::BuiltPayload, rpc::{ - api::{BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4}, compat::engine::payload::block_to_payload_v3, types::{engine::BlobsBundleV1, TransactionRequest}, }, diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 75c06a2554c..abcdf98b544 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -35,8 +35,6 @@ alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } -serde.workspace = true -serde_with.workspace = true [features] client = [ diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 0a4fa9f660e..73775112dcf 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -46,10 +46,7 @@ pub mod servers { rpc::RpcApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, - validation::{ - BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, - BuilderBlockValidationRequestV4, - }, + validation::BlockSubmissionValidationApiServer, web3::Web3ApiServer, }; pub use reth_rpc_eth_api::{ diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index 797eee7ae52..5e4f2e26143 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -1,45 +1,10 @@ //! API for block submission validation. -use alloy_primitives::B256; use alloy_rpc_types_beacon::relay::{ - BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, SignedBidSubmissionV3, - SignedBidSubmissionV4, + BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use jsonrpsee::proc_macros::rpc; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -/// A Request to validate a [`SignedBidSubmissionV3`] -/// -/// -#[serde_as] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BuilderBlockValidationRequestV3 { - /// The request to be validated. - #[serde(flatten)] - pub request: SignedBidSubmissionV3, - /// The registered gas limit for the validation request. - #[serde_as(as = "DisplayFromStr")] - pub registered_gas_limit: u64, - /// The parent beacon block root for the validation request. - pub parent_beacon_block_root: B256, -} - -/// A Request to validate a [`SignedBidSubmissionV4`] -/// -/// -#[serde_as] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BuilderBlockValidationRequestV4 { - /// The request to be validated. - #[serde(flatten)] - pub request: SignedBidSubmissionV4, - /// The registered gas limit for the validation request. - #[serde_as(as = "DisplayFromStr")] - pub registered_gas_limit: u64, - /// The parent beacon block root for the validation request. - pub parent_beacon_block_root: B256, -} /// Block validation rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))] diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 1476180d431..5d7d00f354b 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,6 +5,7 @@ use alloy_rpc_types::engine::{ }; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; @@ -20,10 +21,7 @@ use reth_provider::{ StateProviderFactory, WithdrawalsProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_api::{ - BlockSubmissionValidationApiServer, BuilderBlockValidationRequestV3, - BuilderBlockValidationRequestV4, -}; +use reth_rpc_api::BlockSubmissionValidationApiServer; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_trie::HashedPostState; From c5ab4243e74f66d06a14710d194d580faab938e2 Mon Sep 17 00:00:00 2001 From: wangjingcun Date: Fri, 8 Nov 2024 18:23:02 +0800 Subject: [PATCH 372/970] chore: remove redundant words in comment (#12394) Signed-off-by: wangjingcun --- crates/net/discv4/src/lib.rs | 2 +- crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ | 2 +- deny.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 788e93048f1..9ffe8451f0e 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -794,7 +794,7 @@ impl Discv4Service { } /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks - /// whether we should should send a new ping first to renew the endpoint proof by checking the + /// whether we should send a new ping first to renew the endpoint proof by checking the /// previously failed findNode requests. It could be that the node is no longer reachable or /// lost our entry. fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ index dbe94755087..767f3791280 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ @@ -851,7 +851,7 @@ struct LIBMDBX_API_TYPE slice : public ::MDBX_val { /// \brief Checks whether the content of the slice is printable. /// \param [in] disable_utf8 By default if `disable_utf8` is `false` function /// checks that content bytes are printable ASCII-7 characters or a valid UTF8 - /// sequences. Otherwise, if if `disable_utf8` is `true` function checks that + /// sequences. Otherwise, if `disable_utf8` is `true` function checks that /// content bytes are printable extended 8-bit ASCII codes. MDBX_NOTHROW_PURE_FUNCTION bool is_printable(bool disable_utf8 = false) const noexcept; diff --git a/deny.toml b/deny.toml index e5823460250..e8f60461c85 100644 --- a/deny.toml +++ b/deny.toml @@ -58,7 +58,7 @@ allow = [ # aren't accepted for every possible crate as with the normal allow list exceptions = [ # TODO: decide on MPL-2.0 handling - # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 + # These dependencies are grandfathered in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, ] From f373efe01d1e79751a7619d22be2221537d33f28 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Fri, 8 Nov 2024 17:36:17 +0700 Subject: [PATCH 373/970] refactor: phase out alloy-rpc-types usage (#12395) --- Cargo.lock | 29 +++++++++---------- crates/consensus/debug-client/Cargo.toml | 2 +- crates/consensus/debug-client/src/client.rs | 2 +- .../debug-client/src/providers/etherscan.rs | 2 +- .../debug-client/src/providers/rpc.rs | 2 +- crates/e2e-test-utils/Cargo.toml | 3 +- crates/e2e-test-utils/src/node.rs | 2 +- crates/e2e-test-utils/src/traits.rs | 2 +- crates/e2e-test-utils/src/transaction.rs | 2 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/error.rs | 2 +- crates/optimism/rpc/src/eth/block.rs | 2 +- crates/optimism/rpc/src/eth/receipt.rs | 2 +- crates/optimism/rpc/src/eth/transaction.rs | 4 +-- crates/rpc/rpc-api/src/anvil.rs | 2 +- crates/rpc/rpc-api/src/debug.rs | 3 +- crates/rpc/rpc-api/src/engine.rs | 8 ++--- crates/rpc/rpc-api/src/otterscan.rs | 2 +- crates/rpc/rpc-api/src/trace.rs | 5 ++-- crates/rpc/rpc-eth-api/Cargo.toml | 2 +- crates/rpc/rpc-eth-api/src/core.rs | 10 +++---- crates/rpc/rpc-eth-api/src/filter.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 4 +-- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 2 +- .../rpc-eth-api/src/helpers/pending_block.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/spec.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 3 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 2 +- .../rpc-eth-api/src/helpers/transaction.rs | 3 +- crates/rpc/rpc-eth-api/src/pubsub.rs | 2 +- crates/rpc/rpc-eth-api/src/types.rs | 4 +-- crates/rpc/rpc-eth-types/Cargo.toml | 1 - crates/rpc/rpc-eth-types/src/cache/db.rs | 2 +- crates/rpc/rpc-eth-types/src/error.rs | 2 +- crates/rpc/rpc-eth-types/src/fee_history.rs | 2 +- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 2 +- crates/rpc/rpc-eth-types/src/logs_utils.rs | 8 ++--- crates/rpc/rpc-eth-types/src/receipt.rs | 4 +-- crates/rpc/rpc-eth-types/src/revm_utils.rs | 2 +- crates/rpc/rpc-eth-types/src/simulate.rs | 6 ++-- crates/rpc/rpc-eth-types/src/transaction.rs | 2 +- crates/rpc/rpc-testing-util/Cargo.toml | 1 - crates/rpc/rpc-testing-util/src/debug.rs | 3 +- crates/rpc/rpc-testing-util/src/trace.rs | 3 +- crates/rpc/rpc-testing-util/tests/it/trace.rs | 2 +- crates/rpc/rpc-types-compat/Cargo.toml | 2 +- crates/rpc/rpc-types-compat/src/block.rs | 2 +- crates/rpc/rpc-types-compat/src/proof.rs | 2 +- .../rpc-types-compat/src/transaction/mod.rs | 2 +- crates/rpc/rpc/Cargo.toml | 5 ++-- crates/rpc/rpc/src/debug.rs | 8 ++--- crates/rpc/rpc/src/engine.rs | 6 ++-- crates/rpc/rpc/src/eth/bundle.rs | 2 +- crates/rpc/rpc/src/eth/filter.rs | 2 +- crates/rpc/rpc/src/eth/helpers/block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/types.rs | 2 +- crates/rpc/rpc/src/eth/pubsub.rs | 2 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 2 +- crates/rpc/rpc/src/otterscan.rs | 2 +- crates/rpc/rpc/src/trace.rs | 4 +-- crates/rpc/rpc/src/validation.rs | 6 ++-- examples/custom-inspector/Cargo.toml | 2 +- examples/custom-inspector/src/main.rs | 2 +- examples/db-access/Cargo.toml | 2 +- examples/db-access/src/main.rs | 2 +- 66 files changed, 106 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 809d4be3135..d6169e25f5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1168,7 +1168,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "tokio", ] @@ -2896,7 +2896,7 @@ version = "0.0.0" dependencies = [ "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "clap", "futures-util", "reth", @@ -2956,7 +2956,7 @@ name = "example-db-access" version = "0.0.0" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "eyre", "reth-chainspec", "reth-db", @@ -3115,9 +3115,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -6800,8 +6800,8 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-provider", - "alloy-rpc-types", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "auto_impl", "eyre", "futures", @@ -7051,7 +7051,8 @@ dependencies = [ "alloy-eips", "alloy-network", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-signer-local", "derive_more 1.0.0", @@ -8324,7 +8325,6 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "derive_more 1.0.0", "jsonrpsee-types", @@ -8623,6 +8623,7 @@ dependencies = [ "alloy-rpc-types-admin", "alloy-rpc-types-beacon", "alloy-rpc-types-debug", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-mev", "alloy-rpc-types-trace", @@ -8709,7 +8710,6 @@ version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-rpc-types-trace", "futures", @@ -8820,9 +8820,9 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-rpc-types-mev", + "alloy-serde", "async-trait", "auto_impl", "dyn-clone", @@ -8859,7 +8859,6 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-sol-types", "derive_more 1.0.0", @@ -8933,9 +8932,9 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-serde", "reth-primitives", "reth-trie-common", "serde", @@ -10429,12 +10428,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index e73125a80bd..18e7aead306 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -21,7 +21,7 @@ reth-tracing.workspace = true alloy-consensus = { workspace = true, features = ["serde"] } alloy-eips.workspace = true alloy-provider = { workspace = true, features = ["ws"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true alloy-primitives.workspace = true diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index b6993a41b90..0e2a50370b8 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -1,8 +1,8 @@ use alloy_consensus::Transaction; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; -use alloy_rpc_types::{Block, BlockTransactions}; use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; +use alloy_rpc_types_eth::{Block, BlockTransactions}; use reth_node_api::EngineTypes; use reth_rpc_builder::auth::AuthServerHandle; use reth_tracing::tracing::warn; diff --git a/crates/consensus/debug-client/src/providers/etherscan.rs b/crates/consensus/debug-client/src/providers/etherscan.rs index 59b402f3e78..d3167b6cfab 100644 --- a/crates/consensus/debug-client/src/providers/etherscan.rs +++ b/crates/consensus/debug-client/src/providers/etherscan.rs @@ -1,6 +1,6 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; -use alloy_rpc_types::Block; +use alloy_rpc_types_eth::Block; use reqwest::Client; use reth_tracing::tracing::warn; use serde::Deserialize; diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index 5312bd55b3f..787515f1a60 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -1,7 +1,7 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; use alloy_provider::{Provider, ProviderBuilder}; -use alloy_rpc_types::{Block, BlockTransactionsKind}; +use alloy_rpc_types_eth::{Block, BlockTransactionsKind}; use futures::StreamExt; use tokio::sync::mpsc::Sender; diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 67bb7455536..e56449551bb 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -41,7 +41,8 @@ tokio-stream.workspace = true serde_json.workspace = true alloy-signer.workspace = true alloy-signer-local = { workspace = true, features = ["mnemonic"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 8b385115b3e..c3dff527eb2 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, pin::Pin}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; -use alloy_rpc_types::BlockNumberOrTag; +use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; use reth::{ diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index a70bbf7afb7..d14445370d4 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,4 +1,4 @@ -use alloy_rpc_types::engine::ExecutionPayloadEnvelopeV4; +use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 58a25dc1257..d24c5579313 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -4,7 +4,7 @@ use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; use alloy_primitives::{hex, Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{Authorization, TransactionInput, TransactionRequest}; +use alloy_rpc_types_eth::{Authorization, TransactionInput, TransactionRequest}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use eyre::Ok; diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 17ebec7ff74..37b64b774a1 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -37,7 +37,6 @@ reth-optimism-forks.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 078da8fe4d1..ffc698b6e98 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,6 +1,6 @@ //! RPC errors specific to OP. -use alloy_rpc_types::error::EthRpcErrorCode; +use alloy_rpc_types_eth::error::EthRpcErrorCode; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; use reth_optimism_evm::OpBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 85f36570f2e..6678fbe5df4 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,6 +1,6 @@ //! Loads and formats OP block RPC response. -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 3563d4ae45d..40ee5d9fd86 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,7 +1,7 @@ //! Loads and formats OP receipt RPC response. use alloy_eips::eip2718::Encodable2718; -use alloy_rpc_types::{Log, TransactionReceipt}; +use alloy_rpc_types_eth::{Log, TransactionReceipt}; use op_alloy_consensus::{ DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, }; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index f62dbbf98cc..90e5e33feb7 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -2,7 +2,7 @@ use alloy_consensus::{Signed, Transaction as _}; use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types::TransactionInfo; +use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; @@ -121,7 +121,7 @@ where .unwrap_or_else(|| inner.max_fee_per_gas()); Transaction { - inner: alloy_rpc_types::Transaction { + inner: alloy_rpc_types_eth::Transaction { inner, block_hash, block_number, diff --git a/crates/rpc/rpc-api/src/anvil.rs b/crates/rpc/rpc-api/src/anvil.rs index baa09166b83..0930264a63b 100644 --- a/crates/rpc/rpc-api/src/anvil.rs +++ b/crates/rpc/rpc-api/src/anvil.rs @@ -1,8 +1,8 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::Block; use alloy_rpc_types_anvil::{Forking, Metadata, MineOptions, NodeInfo}; +use alloy_rpc_types_eth::Block; /// Anvil rpc interface. /// https://book.getfoundry.sh/reference/anvil/#custom-methods diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index d1837787d54..1b857d4a11f 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,8 +1,7 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256}; -use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index d92173112eb..f78b8349be8 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -6,15 +6,15 @@ use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, -}; use alloy_rpc_types_engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, +}; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_engine_primitives::EngineTypes; diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index d3e61c03104..b4679ae5cec 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,7 +1,7 @@ use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::Header; +use alloy_rpc_types_eth::Header; use alloy_rpc_types_trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 45059284a2d..41e2b4c1c3e 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,7 +1,8 @@ use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; -use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, Index, +}; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index edfd57b201d..e4b1b28074f 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -33,13 +33,13 @@ reth-trie.workspace = true reth-node-api.workspace = true # ethereum +alloy-serde.workspace = true alloy-eips.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-json-rpc.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true alloy-consensus.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index a89364a15db..421c10f8b41 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -4,14 +4,14 @@ use alloy_dyn_abi::TypedData; use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; -use alloy_rpc_types::{ - serde_helpers::JsonStorageKey, +use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, StateContext, SyncStatus, Work, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -276,7 +276,7 @@ pub trait EthApi { &self, address: Address, block: BlockId, - ) -> RpcResult>; + ) -> RpcResult>; /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. #[method(name = "maxPriorityFeePerGas")] @@ -694,7 +694,7 @@ where &self, address: Address, block: BlockId, - ) -> RpcResult> { + ) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getAccount"); Ok(EthState::get_account(self, address, block).await?) } diff --git a/crates/rpc/rpc-eth-api/src/filter.rs b/crates/rpc/rpc-eth-api/src/filter.rs index c73d9672843..1acba351af7 100644 --- a/crates/rpc/rpc-eth-api/src/filter.rs +++ b/crates/rpc/rpc-eth-api/src/filter.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for filtering. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; +use alloy_rpc_types_eth::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Rpc Interface for poll-based ethereum filter API. diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index a9794af004a..e25ea84d699 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use alloy_eips::BlockId; -use alloy_rpc_types::{Block, Header, Index}; +use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 10148fbe78b..ef29f807026 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -8,12 +8,12 @@ use crate::{ use alloy_consensus::BlockHeader; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use futures::Future; use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 18d2d631148..8ed45d2ac08 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,7 +1,7 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. use alloy_primitives::U256; -use alloy_rpc_types::{BlockNumberOrTag, FeeHistory}; +use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; use reth_chainspec::EthChainSpec; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index a0065d79342..0173485aef5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -10,7 +10,7 @@ use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types::BlockNumberOrTag; +use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::{ diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index a6213017af8..9957a00a41d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -1,7 +1,7 @@ //! Loads chain metadata. use alloy_primitives::{Address, U256, U64}; -use alloy_rpc_types::{Stage, SyncInfo, SyncStatus}; +use alloy_rpc_types_eth::{Stage, SyncInfo, SyncStatus}; use futures::Future; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_errors::{RethError, RethResult}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 6a34967058b..7bc365d91c4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -4,7 +4,8 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; +use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; +use alloy_serde::JsonStorageKey; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 29bde519960..36d901fda5f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -4,7 +4,7 @@ use std::{fmt::Display, sync::Arc}; use crate::{FromEvmError, RpcNodeCore}; use alloy_primitives::B256; -use alloy_rpc_types::{BlockId, TransactionInfo}; +use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index ab94e3dd107..234008f21fe 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -6,8 +6,7 @@ use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::{BlockNumberOrTag, TransactionInfo}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; diff --git a/crates/rpc/rpc-eth-api/src/pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs index b70dacb26fa..ecbb1fe9a83 100644 --- a/crates/rpc/rpc-eth-api/src/pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for pubsub subscription. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::pubsub::{Params, SubscriptionKind}; +use alloy_rpc_types_eth::pubsub::{Params, SubscriptionKind}; use jsonrpsee::proc_macros::rpc; /// Ethereum pub-sub rpc interface. diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 620f4523d21..b75bce026fb 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -6,7 +6,7 @@ use std::{ }; use alloy_network::Network; -use alloy_rpc_types::Block; +use alloy_rpc_types_eth::Block; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError}; @@ -22,7 +22,7 @@ pub trait EthApiTypes: Send + Sync + Clone { + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. - type NetworkTypes: Network; + type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 80901bcf812..9b38ed89724 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -35,7 +35,6 @@ alloy-rpc-types-eth.workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-rpc-types.workspace = true alloy-eips.workspace = true # rpc diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 627fd2b2df7..50fd4b04625 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -116,7 +116,7 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { fn convert_block_hash( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, + hash_or_number: alloy_rpc_types_eth::BlockHashOrNumber, ) -> reth_errors::ProviderResult> { self.0.convert_block_hash(hash_or_number) } diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 9241e9e0b6b..641cbc88291 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -4,7 +4,7 @@ use std::time::Duration; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; -use alloy_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; use reth_primitives::revm_primitives::InvalidHeader; diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 7692d47de99..6c8b66246f3 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -8,7 +8,7 @@ use std::{ use alloy_eips::eip1559::calc_next_block_base_fee; use alloy_primitives::B256; -use alloy_rpc_types::TxGasAndReward; +use alloy_rpc_types_eth::TxGasAndReward; use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 9da373376bd..d73cd72b650 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -4,7 +4,7 @@ use alloy_consensus::constants::GWEI_TO_WEI; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; use reth_rpc_server_types::constants; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index aa132675c93..3e7c9db6d68 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -4,7 +4,7 @@ use alloy_eips::BlockNumHash; use alloy_primitives::TxHash; -use alloy_rpc_types::{FilteredParams, Log}; +use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; use reth_primitives::{Receipt, SealedBlockWithSenders}; @@ -179,7 +179,7 @@ pub fn get_filter_block_range( #[cfg(test)] mod tests { - use alloy_rpc_types::Filter; + use alloy_rpc_types_eth::Filter; use super::*; @@ -242,8 +242,8 @@ mod tests { let start_block = info.best_number; let (from_block_number, to_block_number) = get_filter_block_range( - from_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), - to_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), + from_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), + to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), start_block, info, ); diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 0734b547ec8..247b4449ef5 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -2,7 +2,7 @@ use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{Log, ReceiptWithBloom, TransactionReceipt}; +use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use revm_primitives::calc_blob_gasprice; @@ -59,7 +59,7 @@ pub fn build_receipt( }) .collect(); - let rpc_receipt = alloy_rpc_types::Receipt { + let rpc_receipt = alloy_rpc_types_eth::Receipt { status: receipt.success.into(), cumulative_gas_used: receipt.cumulative_gas_used as u128, logs, diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index ee3c6e7d9a7..782ef569796 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,7 +1,7 @@ //! utilities for working with revm use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{AccountOverride, StateOverride}, BlockOverrides, }; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index d881b854a79..b2a9a5e62ed 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -2,11 +2,11 @@ use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; use alloy_primitives::PrimitiveSignature as Signature; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, + transaction::TransactionRequest, Block, BlockTransactionsKind, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, @@ -225,7 +225,7 @@ pub fn build_block( .into_iter() .map(|log| { log_index += 1; - alloy_rpc_types::Log { + alloy_rpc_types_eth::Log { inner: log, log_index: Some(log_index - 1), transaction_index: Some(transaction_index as u64), diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 7d2237a1b7f..bfff1cafead 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -3,7 +3,7 @@ //! Transaction wrapper that labels transaction with its origin. use alloy_primitives::B256; -use alloy_rpc_types::TransactionInfo; +use alloy_rpc_types_eth::TransactionInfo; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index e5c57502e2b..149073b1c68 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -19,7 +19,6 @@ reth-rpc-api = { workspace = true, features = ["client"] } # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-trace.workspace = true alloy-eips.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index d4c7dce860b..a18771af3b0 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -8,8 +8,7 @@ use std::{ use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types::{Block, Transaction}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Transaction}; use alloy_rpc_types_trace::{ common::TraceResult, geth::{GethDebugTracerType, GethDebugTracingOptions, GethTrace}, diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index 097d582df45..b963fa69d8b 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -2,8 +2,7 @@ use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; -use alloy_rpc_types::Index; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index b0fccefbb46..4c5d2ccb2a6 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ //! Integration tests for the trace API. use alloy_primitives::map::HashSet; -use alloy_rpc_types::{Block, Transaction}; +use alloy_rpc_types_eth::{Block, Transaction}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index b9a9e5f0361..2e45d210d17 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -17,10 +17,10 @@ reth-primitives.workspace = true reth-trie-common.workspace = true # ethereum +alloy-serde.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 3b297ba0bc3..cfa1561c634 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -3,7 +3,7 @@ use alloy_consensus::Sealed; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, Withdrawals}; diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs index 34128801f8d..b860bc3491d 100644 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ b/crates/rpc/rpc-types-compat/src/proof.rs @@ -1,7 +1,7 @@ //! Compatibility functions for rpc proof related types. -use alloy_rpc_types::serde_helpers::JsonStorageKey; use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; +use alloy_serde::JsonStorageKey; use reth_trie_common::{AccountProof, StorageProof}; /// Creates a new rpc storage proof from a primitive storage proof type. diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 27f0b0288d5..cfbaaa622fb 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -3,7 +3,7 @@ use std::fmt; use alloy_consensus::Transaction as _; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 876467d1f4b..ac3a548f9b5 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -47,14 +47,15 @@ alloy-genesis.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-beacon.workspace = true -alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types.workspace = true +alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types", "serde"] } alloy-rpc-types-debug.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-mev.workspace = true alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true +alloy-rpc-types-engine.workspace = true alloy-serde.workspace = true revm = { workspace = true, features = [ "optional_block_gas_limit", diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 5b7e161691c..4404515e03c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,11 +1,11 @@ use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; -use alloy_rpc_types::{ - state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, -}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::EvmOverrides, transaction::TransactionRequest, Block as RpcBlock, BlockError, Bundle, + StateContext, TransactionInfo, +}; use alloy_rpc_types_trace::geth::{ call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index ac4de7c74e1..fca78d62d63 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,9 +1,9 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 4d72efd1f8c..db8c01c5c3b 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -106,7 +106,7 @@ where .into()) } - let block_id: alloy_rpc_types::BlockId = state_block_number.into(); + let block_id: alloy_rpc_types_eth::BlockId = state_block_number.into(); // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 3d05cdc727f..589cb801e2c 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -10,7 +10,7 @@ use std::{ }; use alloy_primitives::TxHash; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, PendingTransactionFilterKind, }; diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 1e2d1802e0d..fd3b9db9da2 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,6 +1,6 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_rpc_types::{BlockId, TransactionReceipt}; +use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index af0a3cbef8f..19ffc55b398 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -2,7 +2,7 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; -use alloy_rpc_types::{Transaction, TransactionInfo}; +use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use reth_rpc_types_compat::TransactionCompat; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 922694cdba6..0702e3147ce 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use alloy_primitives::TxHash; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 67fd5181759..f49d7984f8b 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,7 +2,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_primitives::U256; -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, SimBundleOverrides, SimBundleResponse, Validity, diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 0585ef76459..d19dcf4d609 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -2,7 +2,7 @@ use alloy_consensus::Transaction; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; -use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; +use alloy_rpc_types_eth::{BlockTransactions, Header, TransactionReceipt}; use alloy_rpc_types_trace::{ otterscan::{ BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 38c73b0f516..41bc0ad2098 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,10 +1,10 @@ use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockOverrides, Index, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 5d7d00f354b..919fe2d8591 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,12 +1,12 @@ use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; use alloy_eips::eip4844::kzg_to_versioned_hash; -use alloy_rpc_types::engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, -}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; +use alloy_rpc_types_engine::{ + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, +}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index e92e90fb9d1..ee6f887e64c 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true alloy-primitives.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 272da63a9b9..67863d00e1e 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -12,7 +12,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_primitives::Address; -use alloy_rpc_types::state::EvmOverrides; +use alloy_rpc_types_eth::state::EvmOverrides; use clap::Parser; use futures_util::StreamExt; use reth::{ diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 0a7ef9bb6b2..3310d1cbd67 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -14,7 +14,7 @@ reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true alloy-primitives.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 5772461bd7a..c3e30fa1cee 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,5 +1,5 @@ use alloy_primitives::{Address, Sealable, B256}; -use alloy_rpc_types::{Filter, FilteredParams}; +use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; From 462540fa30681bff08fea1dda37aa231e4da521f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 8 Nov 2024 14:24:02 +0400 Subject: [PATCH 374/970] fix: pending transaction ordering (#12382) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/tests/e2e/p2p.rs | 15 ++++ crates/transaction-pool/src/pool/pending.rs | 79 +++++++++++------ crates/transaction-pool/src/pool/txpool.rs | 97 +++++++++++++++++---- 5 files changed, 146 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6169e25f5f..84b660cd737 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8241,6 +8241,7 @@ dependencies = [ "alloy-rpc-types-engine", "clap", "eyre", + "futures", "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 7674cbd37c0..07315a07f4e 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -66,6 +66,7 @@ tokio.workspace = true alloy-primitives.workspace = true alloy-genesis.workspace = true op-alloy-consensus.workspace = true +futures.workspace = true [features] optimism = [ diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 30affa9bafb..6240b781472 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,5 +1,6 @@ use crate::utils::{advance_chain, setup}; use alloy_rpc_types_engine::PayloadStatusEnum; +use futures::StreamExt; use reth::blockchain_tree::error::BlockchainTreeError; use std::sync::Arc; use tokio::sync::Mutex; @@ -25,6 +26,19 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); // On second node, sync optimistically up to block number 88a + second_node + .engine_api + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth - 1]) + .await?; + second_node + .wait_block( + (tip - reorg_depth - 1) as u64, + canonical_chain[tip_index - reorg_depth - 1], + true, + ) + .await?; + // We send FCU twice to ensure that pool receives canonical chain update on the second FCU + // This is required because notifications are not sent during backfill sync second_node .engine_api .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) @@ -32,6 +46,7 @@ async fn can_sync() -> eyre::Result<()> { second_node .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; + second_node.engine_api.canonical_stream.next().await.unwrap(); // On third node, sync optimistically up to block number 90a third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index c9cfd85e288..357cea48974 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -8,7 +8,7 @@ use crate::{ }; use std::{ cmp::Ordering, - collections::{BTreeMap, BTreeSet}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -38,14 +38,10 @@ pub struct PendingPool { all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. - /// - /// Sorted by their scoring value. - highest_nonces: BTreeSet>, + highest_nonces: HashMap>, /// Independent transactions that can be included directly and don't require other /// transactions. - /// - /// Sorted by their scoring value. - independent_transactions: BTreeSet>, + independent_transactions: HashMap>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -108,7 +104,7 @@ impl PendingPool { pub(crate) fn best(&self) -> BestTransactions { BestTransactions { all: self.by_id.clone(), - independent: self.independent_transactions.clone(), + independent: self.independent_transactions.values().cloned().collect(), invalid: Default::default(), new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), skip_blobs: false, @@ -255,17 +251,26 @@ impl PendingPool { /// Updates the independent transaction and highest nonces set, assuming the given transaction /// is being _added_ to the pool. fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction) { - let ancestor_id = tx.transaction.id().unchecked_ancestor(); - if let Some(ancestor) = ancestor_id.and_then(|id| self.by_id.get(&id)) { - // the transaction already has an ancestor, so we only need to ensure that the - // highest nonces set actually contains the highest nonce for that sender - self.highest_nonces.remove(ancestor); - } else { - // If there's __no__ ancestor in the pool, then this transaction is independent, this is - // guaranteed because this pool is gapless. - self.independent_transactions.insert(tx.clone()); + match self.highest_nonces.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() < tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } + } + match self.independent_transactions.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() > tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } } - self.highest_nonces.insert(tx.clone()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -306,6 +311,7 @@ impl PendingPool { // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { + dbg!("notify"); let _ = self.new_transaction_notifier.send(tx.clone()); } @@ -320,19 +326,26 @@ impl PendingPool { &mut self, id: &TransactionId, ) -> Option>> { - // mark the next as independent if it exists - if let Some(unlocked) = self.get(&id.descendant()) { - self.independent_transactions.insert(unlocked.clone()); + if let Some(lowest) = self.independent_transactions.get(&id.sender) { + if lowest.transaction.nonce() == id.nonce { + self.independent_transactions.remove(&id.sender); + // mark the next as independent if it exists + if let Some(unlocked) = self.get(&id.descendant()) { + self.independent_transactions.insert(id.sender, unlocked.clone()); + } + } } + let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); self.all.remove(&tx); - self.independent_transactions.remove(&tx); - // switch out for the next ancestor if there is one - if self.highest_nonces.remove(&tx) { + if let Some(highest) = self.highest_nonces.get(&id.sender) { + if highest.transaction.nonce() == id.nonce { + self.highest_nonces.remove(&id.sender); + } if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(ancestor.clone()); + self.highest_nonces.insert(id.sender, ancestor.clone()); } } Some(tx.transaction) @@ -398,8 +411,12 @@ impl PendingPool { // we can reuse the temp array removed.clear(); + // we prefer removing transactions with lower ordering + let mut worst_transactions = self.highest_nonces.values().collect::>(); + worst_transactions.sort(); + // loop through the highest nonces set, removing transactions until we reach the limit - for tx in &self.highest_nonces { + for tx in worst_transactions { // return early if the pool is under limits if !limit.is_exceeded(original_length - total_removed, original_size - total_size) || non_local_senders == 0 @@ -513,6 +530,12 @@ impl PendingPool { self.by_id.get(id) } + /// Returns a reference to the independent transactions in the pool + #[cfg(test)] + pub(crate) const fn independent(&self) -> &HashMap> { + &self.independent_transactions + } + /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { @@ -668,7 +691,7 @@ mod tests { // First transaction should be evicted. assert_eq!( - pool.highest_nonces.iter().next().map(|tx| *tx.transaction.hash()), + pool.highest_nonces.values().min().map(|tx| *tx.transaction.hash()), Some(*t.hash()) ); @@ -723,7 +746,7 @@ mod tests { .collect::>(); let actual_highest_nonces = pool .highest_nonces - .iter() + .values() .map(|tx| (tx.transaction.sender(), tx.transaction.nonce())) .collect::>(); assert_eq!(expected_highest_nonces, actual_highest_nonces); diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1258f4a270a..1d35f742ab6 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1676,29 +1676,17 @@ impl AllTransactions { // The next transaction of this sender let on_chain_id = TransactionId::new(transaction.sender_id(), on_chain_nonce); { - // get all transactions of the sender's account - let mut descendants = self.descendant_txs_mut(&on_chain_id).peekable(); - // Tracks the next nonce we expect if the transactions are gapless let mut next_nonce = on_chain_id.nonce; // We need to find out if the next transaction of the sender is considered pending - let mut has_parked_ancestor = if ancestor.is_none() { - // the new transaction is the next one - false - } else { - // The transaction was added above so the _inclusive_ descendants iterator - // returns at least 1 tx. - let (id, tx) = descendants.peek().expect("includes >= 1"); - if id.nonce < inserted_tx_id.nonce { - !tx.state.is_pending() - } else { - true - } - }; + // The direct descendant has _no_ parked ancestors because the `on_chain_nonce` is + // pending, so we can set this to `false` + let mut has_parked_ancestor = false; - // Traverse all transactions of the sender and update existing transactions - for (id, tx) in descendants { + // Traverse all future transactions of the sender starting with the on chain nonce, and + // update existing transactions: `[on_chain_nonce,..]` + for (id, tx) in self.descendant_txs_mut(&on_chain_id) { let current_pool = tx.subpool; // If there's a nonce gap, we can shortcircuit @@ -2902,7 +2890,7 @@ mod tests { pool.update_basefee(pool_base_fee); // 2 txs, that should put the pool over the size limit but not max txs - let a_txs = MockTransactionSet::dependent(a_sender, 0, 2, TxType::Eip1559) + let a_txs = MockTransactionSet::dependent(a_sender, 0, 3, TxType::Eip1559) .into_iter() .map(|mut tx| { tx.set_size(default_limits.max_size / 2 + 1); @@ -3257,4 +3245,75 @@ mod tests { vec![1, 2, 3] ); } + + #[test] + fn test_pending_ordering() { + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().with_nonce(1).set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + + // nonce gap, tx should be queued + pool.add_transaction(v0.clone(), U256::MAX, 0).unwrap(); + assert_eq!(1, pool.queued_transactions().len()); + + // nonce gap is closed on-chain, both transactions should be moved to pending + pool.add_transaction(v1, U256::MAX, 1).unwrap(); + + assert_eq!(2, pool.pending_transactions().len()); + assert_eq!(0, pool.queued_transactions().len()); + + assert_eq!( + pool.pending_pool.independent().get(&v0.sender_id()).unwrap().transaction.nonce(), + v0.nonce() + ); + } + + // + #[test] + fn one_sender_one_independent_transaction() { + let mut on_chain_balance = U256::from(4_999); // only enough for 4 txs + let mut on_chain_nonce = 40; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::mock(); + let mut submitted_txs = Vec::new(); + + // We use a "template" because we want all txs to have the same sender. + let template = + MockTransaction::eip1559().inc_price().inc_limit().with_value(U256::from(1_001)); + + // Add 8 txs. Because the balance is only sufficient for 4, so the last 4 will be + // Queued. + for tx_nonce in 40..48 { + let tx = f.validated(template.clone().with_nonce(tx_nonce).rng_hash()); + submitted_txs.push(*tx.id()); + pool.add_transaction(tx, on_chain_balance, on_chain_nonce).unwrap(); + } + + // A block is mined with two txs (so nonce is changed from 40 to 42). + // Now the balance gets so high that it's enough to execute alltxs. + on_chain_balance = U256::from(999_999); + on_chain_nonce = 42; + pool.remove_transaction(&submitted_txs[0]); + pool.remove_transaction(&submitted_txs[1]); + + // Add 4 txs. + for tx_nonce in 48..52 { + pool.add_transaction( + f.validated(template.clone().with_nonce(tx_nonce).rng_hash()), + on_chain_balance, + on_chain_nonce, + ) + .unwrap(); + } + + let best_txs: Vec<_> = pool.pending().best().map(|tx| *tx.id()).collect(); + assert_eq!(best_txs.len(), 10); // 8 - 2 + 4 = 10 + + assert_eq!(pool.pending_pool.independent().len(), 1); + } } From 9f6f63d45a1a5b9edf3087a9210e6a9170e4c1ec Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 8 Nov 2024 04:50:46 -0600 Subject: [PATCH 375/970] CLI parameter to specify the broadcast channel capacity of PendingPool (#12388) Co-authored-by: Matthias Seitz --- book/cli/reth/node.md | 5 +++++ crates/node/core/src/args/txpool.rs | 13 ++++++++++--- crates/transaction-pool/src/config.rs | 6 ++++++ crates/transaction-pool/src/lib.rs | 6 +++--- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 5f0090ef896..d1bca0a43b2 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -502,6 +502,11 @@ TxPool: [default: 1024] + --txpool.max-new-pending-txs-notifications + How many new pending transactions to buffer and send to in progress pending transaction iterators + + [default: 200] + Builder: --builder.extradata Block extra data set by the payload builder diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 2e691aaa963..a8ea1d9cdba 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -9,9 +9,9 @@ use reth_transaction_pool::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, validate::DEFAULT_MAX_TX_INPUT_BYTES, LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; /// Parameters for debugging purposes #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -86,6 +86,11 @@ pub struct TxPoolArgs { /// Maximum number of new transactions to buffer #[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = NEW_TX_LISTENER_BUFFER_SIZE)] pub new_tx_listener_buffer_size: usize, + + /// How many new pending transactions to buffer and send to in progress pending transaction + /// iterators. + #[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = MAX_NEW_PENDING_TXS_NOTIFICATIONS)] + pub max_new_pending_txs_notifications: usize, } impl Default for TxPoolArgs { @@ -110,6 +115,7 @@ impl Default for TxPoolArgs { additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } @@ -148,6 +154,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { gas_limit: self.gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, + max_new_pending_txs_notifications: self.max_new_pending_txs_notifications, } } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index d4518846258..212df34bd37 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -27,6 +27,9 @@ pub const DEFAULT_PRICE_BUMP: u128 = 10; /// This enforces that a blob transaction requires a 100% price bump to be replaced pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100; +/// Default maximum new transactions for broadcasting. +pub const MAX_NEW_PENDING_TXS_NOTIFICATIONS: usize = 200; + /// Configuration options for the Transaction pool. #[derive(Debug, Clone)] pub struct PoolConfig { @@ -53,6 +56,8 @@ pub struct PoolConfig { pub pending_tx_listener_buffer_size: usize, /// Bound on number of new transactions from `reth_network::TransactionsManager` to buffer. pub new_tx_listener_buffer_size: usize, + /// How many new pending transactions to buffer and send iterators in progress. + pub max_new_pending_txs_notifications: usize, } impl PoolConfig { @@ -80,6 +85,7 @@ impl Default for PoolConfig { local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 669cb69b0e8..8d11d7595b1 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -166,9 +166,9 @@ pub use crate::{ blobstore::{BlobStore, BlobStoreError}, config::{ LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }, error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, From 02d2593b2eb9481f85dd3b3edcca3c12ddd68769 Mon Sep 17 00:00:00 2001 From: Seva Zhidkov Date: Fri, 8 Nov 2024 11:45:27 +0000 Subject: [PATCH 376/970] feat(transaction-pool): chaining & static txs for best transactions trait (#12320) Co-authored-by: Matthias Seitz --- Cargo.lock | 4 + crates/optimism/node/Cargo.toml | 27 +- crates/optimism/node/src/lib.rs | 4 + .../optimism/node/{tests/e2e => src}/utils.rs | 14 +- crates/optimism/node/tests/e2e/main.rs | 3 - crates/optimism/node/tests/e2e/p2p.rs | 2 +- crates/optimism/node/tests/it/main.rs | 3 + crates/optimism/node/tests/it/priority.rs | 190 +++++++++++ crates/optimism/payload/src/builder.rs | 33 +- crates/transaction-pool/src/pool/best.rs | 294 +++++++++++++++++- crates/transaction-pool/src/pool/mod.rs | 5 +- crates/transaction-pool/src/traits.rs | 18 ++ 12 files changed, 555 insertions(+), 42 deletions(-) rename crates/optimism/node/{tests/e2e => src}/utils.rs (79%) create mode 100644 crates/optimism/node/tests/it/priority.rs diff --git a/Cargo.lock b/Cargo.lock index 84b660cd737..95f8166ce2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8235,10 +8235,13 @@ dependencies = [ name = "reth-optimism-node" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-signer-local", "clap", "eyre", "futures", @@ -8261,6 +8264,7 @@ dependencies = [ "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-node", "reth-optimism-payload-builder", "reth-optimism-rpc", "reth-payload-builder", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 07315a07f4e..c1e23e3d571 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -55,17 +55,23 @@ parking_lot.workspace = true # rpc serde_json.workspace = true +# test-utils dependencies +reth = { workspace = true, optional = true } +reth-e2e-test-utils = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } + [dev-dependencies] -reth.workspace = true +reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true -reth-e2e-test-utils.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } -tokio.workspace = true alloy-primitives.workspace = true -alloy-genesis.workspace = true op-alloy-consensus.workspace = true +alloy-signer-local.workspace = true +alloy-network.workspace = true +alloy-consensus.workspace = true futures.workspace = true [features] @@ -79,15 +85,21 @@ optimism = [ "reth-optimism-rpc/optimism", "reth-engine-local/optimism", "reth-optimism-consensus/optimism", - "reth-db/optimism" + "reth-db/optimism", + "reth-optimism-node/optimism" ] asm-keccak = [ "reth-primitives/asm-keccak", "reth/asm-keccak", "alloy-primitives/asm-keccak", - "revm/asm-keccak" + "revm/asm-keccak", + "reth-optimism-node/asm-keccak" ] test-utils = [ + "reth", + "reth-e2e-test-utils", + "alloy-genesis", + "tokio", "reth-node-builder/test-utils", "reth-chainspec/test-utils", "reth-consensus/test-utils", @@ -100,5 +112,6 @@ test-utils = [ "reth-provider/test-utils", "reth-transaction-pool/test-utils", "reth-trie-db/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-optimism-node/test-utils" ] diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 6419611067e..7af0f3b8a72 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -22,6 +22,10 @@ pub use node::OpNode; pub mod txpool; +/// Helpers for running test node instances. +#[cfg(feature = "test-utils")] +pub mod utils; + pub use reth_optimism_payload_builder::{ OpBuiltPayload, OpPayloadBuilder, OpPayloadBuilderAttributes, }; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/src/utils.rs similarity index 79% rename from crates/optimism/node/tests/e2e/utils.rs rename to crates/optimism/node/src/utils.rs index c3b6acddc5a..b54015fef0c 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,3 +1,4 @@ +use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; @@ -5,9 +6,6 @@ use reth_e2e_test_utils::{ transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, }; use reth_optimism_chainspec::OpChainSpecBuilder; -use reth_optimism_node::{ - node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes, -}; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tokio::sync::Mutex; @@ -15,8 +13,10 @@ use tokio::sync::Mutex; /// Optimism Node Helper type pub(crate) type OpNode = NodeHelperType>>; -pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); +/// Creates the initial setup with `num_nodes` of the node config, started and connected. +pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = + serde_json::from_str(include_str!("../tests/assets/genesis.json")).unwrap(); reth_e2e_test_utils::setup( num_nodes, Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), @@ -27,7 +27,7 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa } /// Advance the chain with sequential payloads returning them in the end. -pub(crate) async fn advance_chain( +pub async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, @@ -49,7 +49,7 @@ pub(crate) async fn advance_chain( } /// Helper function to create a new eth payload attributes -pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { +pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { let attributes = PayloadAttributes { timestamp, prev_randao: B256::ZERO, diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs index 3438c766048..7f4b22ba7e0 100644 --- a/crates/optimism/node/tests/e2e/main.rs +++ b/crates/optimism/node/tests/e2e/main.rs @@ -3,7 +3,4 @@ #[cfg(feature = "optimism")] mod p2p; -#[cfg(feature = "optimism")] -mod utils; - const fn main() {} diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 6240b781472..3db4cfab869 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,7 +1,7 @@ -use crate::utils::{advance_chain, setup}; use alloy_rpc_types_engine::PayloadStatusEnum; use futures::StreamExt; use reth::blockchain_tree::error::BlockchainTreeError; +use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index b84dd7426c2..d0533fc4541 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -3,4 +3,7 @@ #[cfg(feature = "optimism")] mod builder; +#[cfg(feature = "optimism")] +mod priority; + const fn main() {} diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs new file mode 100644 index 00000000000..52e3bef3d91 --- /dev/null +++ b/crates/optimism/node/tests/it/priority.rs @@ -0,0 +1,190 @@ +//! Node builder test that customizes priority of transactions in the block. + +use alloy_consensus::TxEip1559; +use alloy_genesis::Genesis; +use alloy_network::TxSignerSync; +use alloy_primitives::{Address, ChainId, TxKind}; +use reth::{args::DatadirArgs, tasks::TaskManager}; +use reth_chainspec::EthChainSpec; +use reth_db::test_utils::create_test_rw_db_with_path; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; +use reth_node_builder::{ + components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, +}; +use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; +use reth_optimism_node::{ + args::RollupArgs, + node::{ + OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpPayloadBuilder, + OpPoolBuilder, + }, + utils::optimism_payload_attributes, + OpEngineTypes, OpNode, +}; +use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use reth_provider::providers::BlockchainProvider2; +use reth_transaction_pool::{ + pool::{BestPayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}, + PayloadTransactions, +}; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[derive(Clone, Debug)] +struct CustomTxPriority { + chain_id: ChainId, +} + +impl OpPayloadTransactions for CustomTxPriority { + fn best_transactions( + &self, + pool: Pool, + attr: reth_transaction_pool::BestTransactionsAttributes, + ) -> impl PayloadTransactions + where + Pool: reth_transaction_pool::TransactionPool, + { + // Block composition: + // 1. Best transactions from the pool (up to 250k gas) + // 2. End-of-block transaction created by the node (up to 100k gas) + + // End of block transaction should send a 0-value transfer to a random address. + let sender = Wallet::default().inner; + let mut end_of_block_tx = TxEip1559 { + chain_id: self.chain_id, + nonce: 1, // it will be 2nd tx after L1 block info tx that uses the same sender + gas_limit: 21000, + max_fee_per_gas: 20e9 as u128, + to: TxKind::Call(Address::random()), + value: 0.try_into().unwrap(), + ..Default::default() + }; + let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); + let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( + TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(end_of_block_tx), + signature, + ), + sender.address(), + ); + + PayloadTransactionsChain::new( + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)), + // Allow 250k gas for the transactions from the pool + Some(250_000), + PayloadTransactionsFixed::single(end_of_block_tx), + // Allow 100k gas for the end-of-block transaction + Some(100_000), + ) + } +} + +/// Builds the node with custom transaction priority service within default payload builder. +fn build_components( + chain_id: ChainId, +) -> ComponentsBuilder< + Node, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, +> +where + Node: + FullNodeTypes>, +{ + let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = + RollupArgs::default(); + ComponentsBuilder::default() + .node_types::() + .pool(OpPoolBuilder::default()) + .payload( + OpPayloadBuilder::new(compute_pending_block) + .with_transactions(CustomTxPriority { chain_id }), + ) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4 }) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) +} + +#[tokio::test] +async fn test_custom_block_priority_config() { + reth_tracing::init_test_tracing(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()); + + // This wallet is going to send: + // 1. L1 block info tx + // 2. End-of-block custom tx + let wallet = Arc::new(Mutex::new(Wallet::default().with_chain_id(chain_spec.chain().into()))); + + // Configure and launch the node. + let config = NodeConfig::new(chain_spec).with_datadir_args(DatadirArgs { + datadir: reth_db::test_utils::tempdir_path().into(), + ..Default::default() + }); + let db = create_test_rw_db_with_path( + config + .datadir + .datadir + .unwrap_or_chain_default(config.chain.chain(), config.datadir.clone()) + .db(), + ); + let tasks = TaskManager::current(); + let node_handle = NodeBuilder::new(config.clone()) + .with_database(db) + .with_types_and_provider::>() + .with_components(build_components(config.chain.chain_id())) + .with_add_ons(OpAddOns::default()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + tasks.executor(), + builder.config.datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await + .expect("Failed to launch node"); + + // Advance the chain with a single block. + let block_payloads = NodeTestContext::new(node_handle.node, optimism_payload_attributes) + .await + .unwrap() + .advance(1, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + // This doesn't matter in the current test (because it's only one block), + // but make sure you're not reusing the nonce from end-of-block tx + // if they have the same signer. + wallet.inner_nonce * 2, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) + .await + .unwrap(); + assert_eq!(block_payloads.len(), 1); + let (block_payload, _) = block_payloads.first().unwrap(); + let block_payload: SealedBlock = block_payload.block().clone(); + assert_eq!(block_payload.body.transactions.len(), 2); // L1 block info tx + end-of-block custom tx + + // Check that last transaction in the block looks like a transfer to a random address. + let end_of_block_tx = block_payload.body.transactions.last().unwrap(); + let end_of_block_tx = end_of_block_tx.transaction.as_eip1559().unwrap(); + assert_eq!(end_of_block_tx.nonce, 1); + assert_eq!(end_of_block_tx.gas_limit, 21_000); + assert!(end_of_block_tx.input.is_empty()); +} diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index dc6084f4881..42326de6ea4 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -2,7 +2,7 @@ use std::{fmt::Display, sync::Arc}; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::merge::BEACON_NONCE; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_engine::PayloadId; @@ -23,8 +23,7 @@ use reth_primitives::{ use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, BestTransactionsFor, - TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, PayloadTransactions, TransactionPool, }; use reth_trie::HashedPostState; use revm::{ @@ -39,6 +38,7 @@ use crate::{ payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; +use reth_transaction_pool::pool::BestPayloadTransactions; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] @@ -390,7 +390,7 @@ where } } -/// A type that returns a the [`BestTransactions`] that should be included in the pool. +/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { /// Returns an iterator that yields the transaction in the order they should get included in the /// new payload. @@ -398,7 +398,7 @@ pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> BestTransactionsFor; + ) -> impl PayloadTransactions; } impl OpPayloadTransactions for () { @@ -406,8 +406,8 @@ impl OpPayloadTransactions for () { &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> BestTransactionsFor { - pool.best_transactions_with_attributes(attr) + ) -> impl PayloadTransactions { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) } } @@ -730,7 +730,7 @@ where &self, info: &mut ExecutionInfo, db: &mut State, - mut best_txs: BestTransactionsFor, + mut best_txs: impl PayloadTransactions, ) -> Result>, PayloadBuilderError> where DB: Database, @@ -746,19 +746,19 @@ where ); let mut evm = self.evm_config.evm_with_env(&mut *db, env); - while let Some(pool_tx) = best_txs.next() { + while let Some(tx) = best_txs.next(()) { // ensure we still have capacity for this transaction - if info.cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + if info.cumulative_gas_used + tx.gas_limit() > block_gas_limit { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from // the iterator before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } // A sequencer's block should never contain blob or deposit transactions from the pool. - if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { - best_txs.mark_invalid(&pool_tx); + if tx.is_eip4844() || tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } @@ -767,9 +767,6 @@ where return Ok(Some(BuildOutcomeKind::Cancelled)) } - // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); - // Configure the environment for the tx. *evm.tx_mut() = self.evm_config.tx_env(tx.as_signed(), tx.signer()); @@ -785,7 +782,7 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); } continue @@ -819,7 +816,7 @@ where // update add to total fees let miner_fee = tx - .effective_tip_per_gas(Some(base_fee)) + .effective_tip_per_gas(base_fee) .expect("fee is always valid; execution succeeded"); info.total_fees += U256::from(miner_fee) * U256::from(gas_used); diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 068ef989953..17165611794 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,10 +1,12 @@ use crate::{ identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, - PoolTransaction, TransactionOrdering, ValidPoolTransaction, + PayloadTransactions, PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; +use alloy_consensus::Transaction; use alloy_primitives::Address; use core::fmt; +use reth_primitives::TransactionSignedEcRecovered; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -48,7 +50,7 @@ impl Iterator for BestTransactionsWithFees { fn next(&mut self) -> Option { // find the next transaction that satisfies the base fee loop { - let best = self.best.next()?; + let best = Iterator::next(&mut self.best)?; // If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return // the transaction if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && @@ -205,6 +207,49 @@ impl Iterator for BestTransactions { } } +/// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to +/// `PayloadTransactions` (used in block composition). +#[derive(Debug)] +pub struct BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + invalid: HashSet
, + best: I, +} + +impl BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + /// Create a new `BestPayloadTransactions` with the given iterator. + pub fn new(best: I) -> Self { + Self { invalid: Default::default(), best } + } +} + +impl PayloadTransactions for BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + fn next(&mut self, _ctx: ()) -> Option { + loop { + let tx = self.best.next()?; + if self.invalid.contains(&tx.sender()) { + continue + } + return Some(tx.to_recovered_transaction()) + } + } + + fn mark_invalid(&mut self, sender: Address, _nonce: u64) { + self.invalid.insert(sender); + } +} + /// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// @@ -350,6 +395,130 @@ where } } +/// An implementation of [`crate::traits::PayloadTransactions`] that yields +/// a pre-defined set of transactions. +/// +/// This is useful to put a sequencer-specified set of transactions into the block +/// and compose it with the rest of the transactions. +#[derive(Debug)] +pub struct PayloadTransactionsFixed { + transactions: Vec, + index: usize, +} + +impl PayloadTransactionsFixed { + /// Constructs a new [`PayloadTransactionsFixed`]. + pub fn new(transactions: Vec) -> Self { + Self { transactions, index: Default::default() } + } + + /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. + pub fn single(transaction: T) -> Self { + Self { transactions: vec![transaction], index: Default::default() } + } +} + +impl PayloadTransactions for PayloadTransactionsFixed { + fn next(&mut self, _ctx: ()) -> Option { + (self.index < self.transactions.len()).then(|| { + let tx = self.transactions[self.index].clone(); + self.index += 1; + tx + }) + } + + fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} +} + +/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple +/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. +/// +/// We can't use [`Iterator::chain`], because: +/// (a) we need to propagate the `mark_invalid` and `no_updates` +/// (b) we need to keep track of the gas +/// +/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator +/// before moving to the second one. +/// +/// If the `before` iterator has transactions that are not fitting into the block, +/// the after iterator will get propagated a `mark_invalid` call for each of them. +#[derive(Debug)] +pub struct PayloadTransactionsChain { + /// Iterator that will be used first + before: B, + /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is + /// enforced. + before_max_gas: Option, + /// Gas used by the transactions from `before` iterator + before_gas: u64, + /// Iterator that will be used after `before` iterator + after: A, + /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is + /// enforced. + after_max_gas: Option, + /// Gas used by the transactions from `after` iterator + after_gas: u64, +} + +impl PayloadTransactionsChain { + /// Constructs a new [`PayloadTransactionsChain`]. + pub fn new( + before: B, + before_max_gas: Option, + after: A, + after_max_gas: Option, + ) -> Self { + Self { + before, + before_max_gas, + before_gas: Default::default(), + after, + after_max_gas, + after_gas: Default::default(), + } + } +} + +impl PayloadTransactions for PayloadTransactionsChain +where + B: PayloadTransactions, + A: PayloadTransactions, +{ + fn next(&mut self, ctx: ()) -> Option { + while let Some(tx) = self.before.next(ctx) { + if let Some(before_max_gas) = self.before_max_gas { + if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { + self.before_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + while let Some(tx) = self.after.next(ctx) { + if let Some(after_max_gas) = self.after_max_gas { + if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { + self.after_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + None + } + + fn mark_invalid(&mut self, sender: Address, nonce: u64) { + self.before.mark_invalid(sender, nonce); + self.after.mark_invalid(sender, nonce); + } +} + #[cfg(test)] mod tests { use super::*; @@ -428,9 +597,9 @@ mod tests { dyn crate::traits::BestTransactions>>, > = Box::new(pool.best()); - let tx = best.next().unwrap(); - best.mark_invalid(&tx); - assert!(best.next().is_none()); + let tx = Iterator::next(&mut best).unwrap(); + crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + assert!(Iterator::next(&mut best).is_none()); } #[test] @@ -737,4 +906,119 @@ mod tests { assert_eq!(tx.nonce() % 2, 0); } } + + #[test] + fn test_best_transactions_prioritized_senders() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 plain transactions from different senders with increasing gas price + for gas_price in 0..5 { + let tx = MockTransaction::eip1559().with_gas_price(gas_price); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Add another transaction with 0 gas price that's going to be prioritized by sender + let prioritized_tx = MockTransaction::eip1559().with_gas_price(0); + let valid_prioritized_tx = f.validated(prioritized_tx.clone()); + pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + + let prioritized_senders = HashSet::from([prioritized_tx.sender()]); + let best = + BestTransactionsWithPrioritizedSenders::new(prioritized_senders, 200, pool.best()); + + // Verify that the prioritized transaction is returned first + // and the rest are returned in the reverse order of gas price + let mut iter = best.into_iter(); + let top_of_block_tx = iter.next().unwrap(); + assert_eq!(top_of_block_tx.max_fee_per_gas(), 0); + assert_eq!(top_of_block_tx.sender(), prioritized_tx.sender()); + for gas_price in (0..5).rev() { + assert_eq!(iter.next().unwrap().max_fee_per_gas(), gas_price); + } + + // TODO: Test that gas limits for prioritized transactions are respected + } + + #[test] + fn test_best_transactions_chained_iterators() { + let mut priority_pool = PendingPool::new(MockOrdering::default()); + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Block composition + // === + // (1) up to 100 gas: custom top-of-block transaction + // (2) up to 100 gas: transactions from the priority pool + // (3) up to 200 gas: only transactions from address A + // (4) up to 200 gas: only transactions from address B + // (5) until block gas limit: all transactions from the main pool + + // Notes: + // - If prioritized addresses overlap, a single transaction will be prioritized twice and + // therefore use the per-segment gas limit twice. + // - Priority pool and main pool must synchronize between each other to make sure there are + // no conflicts for the same nonce. For example, in this scenario, pools can't reject + // transactions with seemingly incorrect nonces, because previous transactions might be in + // the other pool. + + let address_top_of_block = Address::random(); + let address_in_priority_pool = Address::random(); + let address_a = Address::random(); + let address_b = Address::random(); + let address_regular = Address::random(); + + // Add transactions to the main pool + { + let prioritized_tx_a = + MockTransaction::eip1559().with_gas_price(5).with_sender(address_a); + // without our custom logic, B would be prioritized over A due to gas price: + let prioritized_tx_b = + MockTransaction::eip1559().with_gas_price(10).with_sender(address_b); + let regular_tx = + MockTransaction::eip1559().with_gas_price(15).with_sender(address_regular); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_a)), 0); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_b)), 0); + pool.add_transaction(Arc::new(f.validated(regular_tx)), 0); + } + + // Add transactions to the priority pool + { + let prioritized_tx = + MockTransaction::eip1559().with_gas_price(0).with_sender(address_in_priority_pool); + let valid_prioritized_tx = f.validated(prioritized_tx); + priority_pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + } + + let mut block = PayloadTransactionsChain::new( + PayloadTransactionsFixed::single( + MockTransaction::eip1559().with_sender(address_top_of_block).into(), + ), + Some(100), + PayloadTransactionsChain::new( + BestPayloadTransactions::new(priority_pool.best()), + Some(100), + BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_a]), + 200, + BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_b]), + 200, + pool.best(), + ), + )), + None, + ), + None, + ); + + assert_eq!(block.next(()).unwrap().signer(), address_top_of_block); + assert_eq!(block.next(()).unwrap().signer(), address_in_priority_pool); + assert_eq!(block.next(()).unwrap().signer(), address_a); + assert_eq!(block.next(()).unwrap().signer(), address_b); + assert_eq!(block.next(()).unwrap().signer(), address_regular); + } + + // TODO: Same nonce test } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 0841c0d3d28..76b2490b12f 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -106,7 +106,10 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::{BestTransactionFilter, BestTransactionsWithPrioritizedSenders}; +pub use best::{ + BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, + PayloadTransactionsChain, PayloadTransactionsFixed, +}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index aa99e7af615..185c08c109a 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1501,6 +1501,24 @@ impl Stream for NewSubpoolTransactionStream { } } +/// Iterator that returns transactions for the block building process in the order they should be +/// included in the block. +/// +/// Can include transactions from the pool and other sources (alternative pools, +/// sequencer-originated transactions, etc.). +pub trait PayloadTransactions { + /// Returns the next transaction to include in the block. + fn next( + &mut self, + // In the future, `ctx` can include access to state for block building purposes. + ctx: (), + ) -> Option; + + /// Exclude descendants of the transaction with given sender and nonce from the iterator, + /// because this transaction won't be included in the block. + fn mark_invalid(&mut self, sender: Address, nonce: u64); +} + #[cfg(test)] mod tests { use super::*; From bce70311559e20db5b4b6ec37c0883d231dc2355 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 8 Nov 2024 15:57:01 +0400 Subject: [PATCH 377/970] feat: allow generic values in `tables!` macro (#12400) --- .../cli/commands/src/test_vectors/tables.rs | 21 +-- crates/storage/db/src/tables/mod.rs | 170 ++++++++++++++---- .../src/providers/static_file/manager.rs | 7 +- 3 files changed, 150 insertions(+), 48 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 29ba50c8d83..22e54ea1336 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -10,6 +10,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; +use reth_primitives::{Header, TransactionSignedNoHash}; use std::collections::HashSet; use tracing::error; @@ -31,16 +32,16 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { fs::create_dir_all(VECTORS_FOLDER)?; macro_rules! generate_vector { - ($table_type:ident, $per_table:expr, TABLE) => { - generate_table_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, TABLE) => { + generate_table_vector::)?>(&mut runner, $per_table)?; }; - ($table_type:ident, $per_table:expr, DUPSORT) => { - generate_dupsort_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, DUPSORT) => { + generate_dupsort_vector::)?>(&mut runner, $per_table)?; }; } macro_rules! generate { - ([$(($table_type:ident, $per_table:expr, $table_or_dup:tt)),*]) => { + ([$(($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, $table_or_dup:tt)),*]) => { let all_tables = vec![$(stringify!($table_type).to_string(),)*]; if tables.is_empty() { @@ -50,10 +51,10 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { for table in tables { match table.as_str() { $( - stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type::NAME); + stringify!($table_type$(<$($generic),+>)?) => { + println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); - generate_vector!($table_type, $per_table, $table_or_dup); + generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, )* _ => { @@ -68,11 +69,11 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { (CanonicalHeaders, PER_TABLE, TABLE), (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), - (Headers, PER_TABLE, TABLE), + (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), (BlockOmmers, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), - (Transactions, 100, TABLE), + (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) ]); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 27f58f8a1f3..c697c319909 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -106,28 +106,39 @@ macro_rules! tables { (@view $name:ident $v:ident) => { $v.view::<$name>() }; (@view $name:ident $v:ident $_subkey:ty) => { $v.view_dupsort::<$name>() }; - ($( $(#[$attr:meta])* table $name:ident; )*) => { + (@value_doc $key:ty, $value:ty) => { + concat!("[`", stringify!($value), "`]") + }; + // Don't generate links if we have generics + (@value_doc $key:ty, $value:ty, $($generic:ident),*) => { + concat!("`", stringify!($value), "`") + }; + + ($($(#[$attr:meta])* table $name:ident$(<$($generic:ident $(= $default:ty)?),*>)? { type Key = $key:ty; type Value = $value:ty; $(type SubKey = $subkey:ty;)? } )*) => { // Table marker types. $( $(#[$attr])* /// - #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to [`", stringify!($value), "`].")] + #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to ", tables!(@value_doc $key, $value, $($($generic),*)?), ".")] $( #[doc = concat!("\n\nThis table's `DUPSORT` subkey is [`", stringify!($subkey), "`].")] )? - pub struct $name { - _private: (), + pub struct $name$(<$($generic $( = $default)?),*>)? { + _private: std::marker::PhantomData<($($($generic,)*)?)>, } // Ideally this implementation wouldn't exist, but it is necessary to derive `Debug` // when a type is generic over `T: Table`. See: https://github.com/rust-lang/rust/issues/26925 - impl fmt::Debug for $name { + impl$(<$($generic),*>)? fmt::Debug for $name$(<$($generic),*>)? { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { unreachable!("this type cannot be instantiated") } } - impl reth_db_api::table::Table for $name { + impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)? + where + $value: reth_db_api::table::Value + 'static + { const NAME: &'static str = table_names::$name; type Key = $key; @@ -248,7 +259,7 @@ macro_rules! tables { /// use reth_db_api::table::Table; /// /// let table = Tables::Headers; - /// let result = tables_to_generic!(table, |GenericTable| GenericTable::NAME); + /// let result = tables_to_generic!(table, |GenericTable| ::NAME); /// assert_eq!(result, table.name()); /// ``` #[macro_export] @@ -269,53 +280,96 @@ macro_rules! tables { tables! { /// Stores the header hashes belonging to the canonical chain. - table CanonicalHeaders; + table CanonicalHeaders { + type Key = BlockNumber; + type Value = HeaderHash; + } /// Stores the total difficulty from a block header. - table HeaderTerminalDifficulties; + table HeaderTerminalDifficulties { + type Key = BlockNumber; + type Value = CompactU256; + } /// Stores the block number corresponding to a header. - table HeaderNumbers; + table HeaderNumbers { + type Key = BlockHash; + type Value = BlockNumber; + } /// Stores header bodies. - table Headers; + table Headers { + type Key = BlockNumber; + type Value = H; + } /// Stores block indices that contains indexes of transaction and the count of them. /// /// More information about stored indices can be found in the [`StoredBlockBodyIndices`] struct. - table BlockBodyIndices; + table BlockBodyIndices { + type Key = BlockNumber; + type Value = StoredBlockBodyIndices; + } /// Stores the uncles/ommers of the block. - table BlockOmmers; + table BlockOmmers { + type Key = BlockNumber; + type Value = StoredBlockOmmers; + } /// Stores the block withdrawals. - table BlockWithdrawals; + table BlockWithdrawals { + type Key = BlockNumber; + type Value = StoredBlockWithdrawals; + } /// Canonical only Stores the transaction body for canonical transactions. - table Transactions; + table Transactions { + type Key = TxNumber; + type Value = T; + } /// Stores the mapping of the transaction hash to the transaction number. - table TransactionHashNumbers; + table TransactionHashNumbers { + type Key = TxHash; + type Value = TxNumber; + } /// Stores the mapping of transaction number to the blocks number. /// /// The key is the highest transaction ID in the block. - table TransactionBlocks; + table TransactionBlocks { + type Key = TxNumber; + type Value = BlockNumber; + } /// Canonical only Stores transaction receipts. - table Receipts; + table Receipts { + type Key = TxNumber; + type Value = Receipt; + } /// Stores all smart contract bytecodes. /// There will be multiple accounts that have same bytecode /// So we would need to introduce reference counter. /// This will be small optimization on state. - table Bytecodes; + table Bytecodes { + type Key = B256; + type Value = Bytecode; + } /// Stores the current state of an [`Account`]. - table PlainAccountState; + table PlainAccountState { + type Key = Address; + type Value = Account; + } /// Stores the current value of a storage key. - table PlainStorageState; + table PlainStorageState { + type Key = Address; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores pointers to block changeset with changes for each account key. /// @@ -335,7 +389,10 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table AccountsHistory, Value = BlockNumberList>; + table AccountsHistory { + type Key = ShardedKey
; + type Value = BlockNumberList; + } /// Stores pointers to block number changeset with changes for each storage key. /// @@ -355,55 +412,98 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table StoragesHistory; + table StoragesHistory { + type Key = StorageShardedKey; + type Value = BlockNumberList; + } /// Stores the state of an account before a certain transaction changed it. /// Change on state can be: account is created, selfdestructed, touched while empty /// or changed balance,nonce. - table AccountChangeSets; + table AccountChangeSets { + type Key = BlockNumber; + type Value = AccountBeforeTx; + type SubKey = Address; + } /// Stores the state of a storage key before a certain transaction changed it. /// If [`StorageEntry::value`] is zero, this means storage was not existing /// and needs to be removed. - table StorageChangeSets; + table StorageChangeSets { + type Key = BlockNumberAddress; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state of an [`Account`] indexed with `keccak256Address` /// This table is in preparation for merklization and calculation of state root. /// We are saving whole account data as it is needed for partial update when /// part of storage is changed. Benefit for merklization is that hashed addresses are sorted. - table HashedAccounts; + table HashedAccounts { + type Key = B256; + type Value = Account; + } /// Stores the current storage values indexed with `keccak256Address` and /// hash of storage key `keccak256key`. /// This table is in preparation for merklization and calculation of state root. /// Benefit for merklization is that hashed addresses/keys are sorted. - table HashedStorages; + table HashedStorages { + type Key = B256; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state's Merkle Patricia Tree. - table AccountsTrie; + table AccountsTrie { + type Key = StoredNibbles; + type Value = BranchNodeCompact; + } /// From HashedAddress => NibblesSubKey => Intermediate value - table StoragesTrie; + table StoragesTrie { + type Key = B256; + type Value = StorageTrieEntry; + type SubKey = StoredNibblesSubKey; + } /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery - table TransactionSenders; + table TransactionSenders { + type Key = TxNumber; + type Value = Address; + } /// Stores the highest synced block number and stage-specific checkpoint of each stage. - table StageCheckpoints; + table StageCheckpoints { + type Key = StageId; + type Value = StageCheckpoint; + } /// Stores arbitrary data to keep track of a stage first-sync progress. - table StageCheckpointProgresses>; + table StageCheckpointProgresses { + type Key = StageId; + type Value = Vec; + } /// Stores the highest pruned block number and prune mode of each prune segment. - table PruneCheckpoints; + table PruneCheckpoints { + type Key = PruneSegment; + type Value = PruneCheckpoint; + } /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. - table VersionHistory; + table VersionHistory { + type Key = u64; + type Value = ClientVersion; + } /// Stores generic chain state info, like the last finalized block. - table ChainState; + table ChainState { + type Key = ChainStateKey; + type Value = BlockNumber; + } } /// Keys for the `ChainState` table. diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 9ccaf051463..66914b00abc 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1645,7 +1645,7 @@ impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { tables::CanonicalHeaders::NAME | - tables::Headers::NAME | + tables::Headers::
::NAME | tables::HeaderTerminalDifficulties::NAME => Ok(self .get_highest_static_file_block(StaticFileSegment::Headers) .map(|block| block + 1) @@ -1655,10 +1655,11 @@ impl StatsReader for StaticFileProvider { .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), - tables::Transactions::NAME => Ok(self + tables::Transactions::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Transactions) .map(|txs| txs + 1) - .unwrap_or_default() as usize), + .unwrap_or_default() + as usize), _ => Err(ProviderError::UnsupportedProvider), } } From 32ebb181caa838e72740b9a50154917258506c56 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Nov 2024 13:44:06 +0100 Subject: [PATCH 378/970] chore: rm duplicated cfg (#12406) --- crates/primitives/src/transaction/mod.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 208474fc6c4..ed1a7daf1e8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1755,8 +1755,6 @@ pub mod serde_bincode_compat { TxEip4844, }; use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; - #[cfg(feature = "optimism")] - use op_alloy_consensus::serde_bincode_compat::TxDeposit; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -1784,8 +1782,7 @@ pub mod serde_bincode_compat { Eip4844(Cow<'a, TxEip4844>), Eip7702(TxEip7702<'a>), #[cfg(feature = "optimism")] - #[cfg(feature = "optimism")] - Deposit(TxDeposit<'a>), + Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit<'a>), } impl<'a> From<&'a super::Transaction> for Transaction<'a> { @@ -1797,7 +1794,9 @@ pub mod serde_bincode_compat { super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), #[cfg(feature = "optimism")] - super::Transaction::Deposit(tx) => Self::Deposit(TxDeposit::from(tx)), + super::Transaction::Deposit(tx) => { + Self::Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit::from(tx)) + } } } } @@ -1900,7 +1899,6 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; - use arbitrary::Arbitrary; use rand::Rng; use reth_testing_utils::generators; From 74d7fe3075aff34e63ce64ca24b7188fc7c7c854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 8 Nov 2024 21:50:48 +0700 Subject: [PATCH 379/970] feat(rpc): optimize block validation with state caching (#12299) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/validation.rs | 50 +++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 919fe2d8591..c3f2aab70bb 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -20,7 +20,7 @@ use reth_provider::{ AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, StateProviderFactory, WithdrawalsProvider, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -28,6 +28,7 @@ use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; +use tokio::sync::RwLock; /// Configuration for validation API. #[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] @@ -64,7 +65,7 @@ pub enum ValidationApiError { Execution(#[from] BlockExecutionError), } -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, @@ -76,10 +77,15 @@ pub struct ValidationApiInner { executor_provider: E, /// Set of disallowed addresses disallow: HashSet
, + /// Cached state reads to avoid redundant disk I/O across multiple validation attempts + /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the + /// latest head block state. Uses async `RwLock` to safely handle concurrent validation + /// requests. + cached_state: RwLock<(B256, CachedReads)>, } /// The type that implements the `validation` rpc namespace trait -#[derive(Clone, Debug, derive_more::Deref)] +#[derive(Debug, derive_more::Deref)] pub struct ValidationApi { #[deref] inner: Arc>, @@ -105,10 +111,31 @@ where payload_validator, executor_provider, disallow, + cached_state: Default::default(), }); Self { inner } } + + /// Returns the cached reads for the given head hash. + async fn cached_reads(&self, head: B256) -> CachedReads { + let cache = self.inner.cached_state.read().await; + if cache.0 == head { + cache.1.clone() + } else { + Default::default() + } + } + + /// Updates the cached state for the given head hash. + async fn update_cached_reads(&self, head: B256, cached_state: CachedReads) { + let mut cache = self.inner.cached_state.write().await; + if cache.0 == head { + cache.1.extend(cached_state); + } else { + *cache = (head, cached_state) + } + } } impl ValidationApi @@ -119,12 +146,11 @@ where + HeaderProvider + AccountReader + WithdrawalsProvider - + Clone + 'static, E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. - pub fn validate_message_against_block( + pub async fn validate_message_against_block( &self, block: SealedBlockWithSenders, message: BidTrace, @@ -168,8 +194,13 @@ where self.consensus.validate_header_against_parent(&block.header, &latest_header)?; self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; - let state_provider = self.provider.state_by_block_hash(latest_header.hash())?; - let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + let latest_header_hash = latest_header.hash(); + let state_provider = self.provider.state_by_block_hash(latest_header_hash)?; + + let mut request_cache = self.cached_reads(latest_header_hash).await; + + let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); + let executor = self.executor_provider.executor(cached_db); let block = block.unseal(); let mut accessed_blacklisted = None; @@ -186,6 +217,9 @@ where }, )?; + // update the cached reads + self.update_cached_reads(latest_header_hash, request_cache).await; + if let Some(account) = accessed_blacklisted { return Err(ValidationApiError::Blacklist(account)) } @@ -413,6 +447,7 @@ where request.request.message, request.registered_gas_limit, ) + .await .map_err(|e| RethError::Other(e.into())) .to_rpc_result() } @@ -446,6 +481,7 @@ where request.request.message, request.registered_gas_limit, ) + .await .map_err(|e| RethError::Other(e.into())) .to_rpc_result() } From 86230d99624fc8c9ef81c6e6b340b9a8c39beead Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Nov 2024 15:50:57 +0100 Subject: [PATCH 380/970] chore: include path in panic (#12407) --- crates/storage/db/benches/utils.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index d4ae96e0006..62c4dfe6ecb 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -26,13 +26,11 @@ where T::Key: Default + Clone + for<'de> serde::Deserialize<'de>, T::Value: Default + Clone + for<'de> serde::Deserialize<'de>, { + let path = + format!("{}/../../../testdata/micro/db/{}.json", env!("CARGO_MANIFEST_DIR"), T::NAME); let list: Vec> = serde_json::from_reader(std::io::BufReader::new( - std::fs::File::open(format!( - "{}/../../../testdata/micro/db/{}.json", - env!("CARGO_MANIFEST_DIR"), - T::NAME - )) - .expect("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`."), + std::fs::File::open(&path) + .unwrap_or_else(|_| panic!("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`: {:?}", path)) )) .unwrap(); From 0da914eaebe5fc296098e3b2f406c7250ef2abd4 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:03:29 +0700 Subject: [PATCH 381/970] chore: enable `dbg_macro` lint (#12409) --- Cargo.toml | 1 + clippy.toml | 1 + crates/storage/db-common/src/init.rs | 5 ++++- crates/transaction-pool/src/pool/pending.rs | 1 - 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4471ce32baa..31d2ebb8d4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,6 +175,7 @@ branches_sharing_code = "warn" clear_with_drain = "warn" cloned_instead_of_copied = "warn" collection_is_never_read = "warn" +dbg_macro = "warn" derive_partial_eq_without_eq = "warn" doc_markdown = "warn" empty_line_after_doc_comments = "warn" diff --git a/clippy.toml b/clippy.toml index 862c568634e..ab08b1132c1 100644 --- a/clippy.toml +++ b/clippy.toml @@ -15,3 +15,4 @@ doc-valid-idents = [ "WAL", "MessagePack", ] +allow-dbg-in-tests = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 014751733e6..8c930b22ef8 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -98,7 +98,10 @@ where database_hash: block_hash, }) } - Err(e) => return Err(dbg!(e).into()), + Err(e) => { + debug!(?e); + return Err(e.into()); + } } debug!("Writing genesis block."); diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 357cea48974..f4bce8c85a6 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -311,7 +311,6 @@ impl PendingPool { // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { - dbg!("notify"); let _ = self.new_transaction_notifier.send(tx.clone()); } From f03b762036cfcb2cfe71a1de7ebc3203e6761ed4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 8 Nov 2024 16:20:26 +0100 Subject: [PATCH 382/970] feat: introduce ParallelProof (#12403) --- Cargo.lock | 1 + crates/trie/parallel/Cargo.toml | 1 + crates/trie/parallel/src/lib.rs | 3 + crates/trie/parallel/src/parallel_proof.rs | 214 +++++++++++++++++++++ 4 files changed, 219 insertions(+) create mode 100644 crates/trie/parallel/src/parallel_proof.rs diff --git a/Cargo.lock b/Cargo.lock index 95f8166ce2f..22c5cb8ae50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9304,6 +9304,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-trie", + "reth-trie-common", "reth-trie-db", "thiserror", "tokio", diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index cc35fe9f914..1b3e2d59be1 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-db.workspace = true reth-trie.workspace = true +reth-trie-common.workspace = true reth-trie-db.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index 40a6af34758..25fcb4bac3a 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -16,6 +16,9 @@ pub mod stats; /// Implementation of parallel state root computation. pub mod parallel_root; +/// Implementation of parallel proof computation. +pub mod parallel_proof; + /// Parallel state root metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/parallel/src/parallel_proof.rs b/crates/trie/parallel/src/parallel_proof.rs new file mode 100644 index 00000000000..9c7d6b6b8b3 --- /dev/null +++ b/crates/trie/parallel/src/parallel_proof.rs @@ -0,0 +1,214 @@ +use crate::{ + parallel_root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets, +}; +use alloy_primitives::{map::HashSet, B256}; +use alloy_rlp::{BufMut, Encodable}; +use itertools::Itertools; +use reth_db::DatabaseError; +use reth_execution_errors::StorageRootError; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, +}; +use reth_trie::{ + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + proof::StorageProof, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + walker::TrieWalker, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, +}; +use reth_trie_common::proof::ProofRetainer; +use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use std::{collections::HashMap, sync::Arc}; +use tracing::debug; + +#[cfg(feature = "metrics")] +use crate::metrics::ParallelStateRootMetrics; + +/// TODO: +#[derive(Debug)] +pub struct ParallelProof { + /// Consistent view of the database. + view: ConsistentDbView, + /// Trie input. + input: TrieInput, + /// Parallel state root metrics. + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics, +} + +impl ParallelProof { + /// Create new state proof generator. + pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + Self { + view, + input, + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics::default(), + } + } +} + +impl ParallelProof +where + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, +{ + /// Generate a state multiproof according to specified targets. + pub fn multiproof( + self, + targets: HashMap>, + ) -> Result { + let mut tracker = ParallelTrieTracker::default(); + + let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); + let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + + // Extend prefix sets with targets + let mut prefix_sets = self.input.prefix_sets.clone(); + prefix_sets.extend(TriePrefixSetsMut { + account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), + storage_prefix_sets: targets + .iter() + .filter(|&(_hashed_address, slots)| (!slots.is_empty())) + .map(|(hashed_address, slots)| { + (*hashed_address, PrefixSetMut::from(slots.iter().map(Nibbles::unpack))) + }) + .collect(), + destroyed_accounts: Default::default(), + }); + let prefix_sets = prefix_sets.freeze(); + + let storage_root_targets = StorageRootTargets::new( + prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), + prefix_sets.storage_prefix_sets.clone(), + ); + + // Pre-calculate storage roots for accounts which were changed. + tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); + debug!(target: "trie::parallel_state_root", len = storage_root_targets.len(), "pre-generating storage proofs"); + let mut storage_proofs = HashMap::with_capacity(storage_root_targets.len()); + for (hashed_address, prefix_set) in + storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) + { + let view = self.view.clone(); + let target_slots: HashSet = + targets.get(&hashed_address).cloned().unwrap_or_default(); + + let trie_nodes_sorted = trie_nodes_sorted.clone(); + let hashed_state_sorted = hashed_state_sorted.clone(); + + let (tx, rx) = std::sync::mpsc::sync_channel(1); + + rayon::spawn_fifo(move || { + let result = (|| -> Result<_, ParallelStateRootError> { + let provider_ro = view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + StorageProof::new_hashed( + trie_cursor_factory, + hashed_cursor_factory, + hashed_address, + ) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) + .storage_multiproof(target_slots) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + }) + })(); + let _ = tx.send(result); + }); + storage_proofs.insert(hashed_address, rx); + } + + let provider_ro = self.view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + // Create the walker. + let walker = TrieWalker::new( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + prefix_sets.account_prefix_set, + ) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer: ProofRetainer = targets.keys().map(Nibbles::unpack).collect(); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + + let mut storages = HashMap::default(); + let mut account_rlp = Vec::with_capacity(128); + let mut account_node_iter = TrieNodeIter::new( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + while let Some(account_node) = + account_node_iter.try_next().map_err(ProviderError::Database)? + { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let storage_multiproof = match storage_proofs.remove(&hashed_address) { + Some(rx) => rx.recv().map_err(|_| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(format!( + "channel closed for {hashed_address}" + )), + )) + })??, + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + })? + } + }; + + // Encode account + account_rlp.clear(); + let account = TrieAccount::from((account, storage_multiproof.root)); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + storages.insert(hashed_address, storage_multiproof); + } + } + } + let _ = hash_builder.root(); + + #[cfg(feature = "metrics")] + self.metrics.record_state_trie(tracker.finish()); + + Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + } +} From ba4f169f8783d78fb7e89aed76e34aa179546fc3 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 8 Nov 2024 16:23:56 +0100 Subject: [PATCH 383/970] chore(db): add log for read transaciton monitor sleep time (#12408) --- crates/storage/libmdbx-rs/src/txn_manager.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 716e8ee62bd..6afd4205a60 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -289,11 +289,11 @@ mod read_transactions { // Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until // the closest deadline of an active read transaction - let duration_until_closest_deadline = - self.max_duration - max_active_transaction_duration.unwrap_or_default(); - std::thread::sleep( - READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline), + let sleep_duration = READ_TRANSACTIONS_CHECK_INTERVAL.min( + self.max_duration - max_active_transaction_duration.unwrap_or_default(), ); + trace!(target: "libmdbx", ?sleep_duration, elapsed = ?now.elapsed(), "Putting transaction monitor to sleep"); + std::thread::sleep(sleep_duration); } }; std::thread::Builder::new() From fc484b793f0f94ffe16f7fc521736cab1ab93c7e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Nov 2024 19:28:22 +0100 Subject: [PATCH 384/970] test: fix test vectors (#12411) --- crates/cli/commands/src/test_vectors/tables.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 22e54ea1336..6b523c6edd1 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -51,7 +51,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { for table in tables { match table.as_str() { $( - stringify!($table_type$(<$($generic),+>)?) => { + stringify!($table_type) => { println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); From 5cfe9a9879aa6d8fb6fcafc2b38bbd25fe358eaf Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 8 Nov 2024 12:29:34 -0600 Subject: [PATCH 385/970] renamed OptimismHardfork to OpHardfork (#12410) --- crates/optimism/chainspec/src/base.rs | 6 +- crates/optimism/chainspec/src/base_sepolia.rs | 6 +- crates/optimism/chainspec/src/lib.rs | 132 +++++++++--------- crates/optimism/chainspec/src/op.rs | 6 +- crates/optimism/chainspec/src/op_sepolia.rs | 6 +- crates/optimism/consensus/src/proof.rs | 10 +- crates/optimism/evm/src/config.rs | 28 ++-- crates/optimism/evm/src/execute.rs | 6 +- crates/optimism/evm/src/l1.rs | 23 ++- crates/optimism/hardforks/src/dev.rs | 12 +- crates/optimism/hardforks/src/hardfork.rs | 33 ++--- crates/optimism/hardforks/src/lib.rs | 30 ++-- crates/optimism/node/src/engine.rs | 9 +- 13 files changed, 148 insertions(+), 159 deletions(-) diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index 7aa26bf9a64..f43457ead43 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_MAINNET: LazyLock> = LazyLock::new(|| { "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_mainnet(), + hardforks: OpHardfork::base_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index b992dcabaf6..adcb9e2bc1f 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_SEPOLIA: LazyLock> = LazyLock::new(|| { "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_sepolia(), + hardforks: OpHardfork::base_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::base_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index aa59e9ab3f8..7bd0e433a2d 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -96,7 +96,7 @@ impl OpChainSpecBuilder { } /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: reth_optimism_forks::OptimismHardfork) -> Self { + pub fn without_fork(mut self, fork: reth_optimism_forks::OpHardfork) -> Self { self.inner = self.inner.without_fork(fork); self } @@ -104,19 +104,17 @@ impl OpChainSpecBuilder { /// Enable Bedrock at genesis pub fn bedrock_activated(mut self) -> Self { self.inner = self.inner.paris_activated(); - self.inner = self - .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Bedrock, ForkCondition::Block(0)); + self.inner = + self.inner.with_fork(reth_optimism_forks::OpHardfork::Bedrock, ForkCondition::Block(0)); self } /// Enable Regolith at genesis pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.inner = self.inner.with_fork( - reth_optimism_forks::OptimismHardfork::Regolith, - ForkCondition::Timestamp(0), - ); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -127,7 +125,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -137,7 +135,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -146,7 +144,7 @@ impl OpChainSpecBuilder { self = self.ecotone_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -155,17 +153,16 @@ impl OpChainSpecBuilder { self = self.fjord_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Granite, ForkCondition::Timestamp(0)); self } /// Enable Holocene at genesis pub fn holocene_activated(mut self) -> Self { self = self.granite_activated(); - self.inner = self.inner.with_fork( - reth_optimism_forks::OptimismHardfork::Holocene, - ForkCondition::Timestamp(0), - ); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Holocene, ForkCondition::Timestamp(0)); self } @@ -194,10 +191,9 @@ impl OpChainSpec { parent: &Header, timestamp: u64, ) -> Result { - let is_holocene_activated = self.inner.is_fork_active_at_timestamp( - reth_optimism_forks::OptimismHardfork::Holocene, - timestamp, - ); + let is_holocene_activated = self + .inner + .is_fork_active_at_timestamp(reth_optimism_forks::OpHardfork::Holocene, timestamp); // If we are in the Holocene, we need to use the base fee params // from the parent block's extra data. // Else, use the base fee params (default values) from chainspec @@ -344,7 +340,7 @@ impl OptimismHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { - use reth_optimism_forks::OptimismHardfork; + use reth_optimism_forks::OpHardfork; let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis); let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); @@ -363,7 +359,7 @@ impl From for OpChainSpec { (EthereumHardfork::London.boxed(), genesis.config.london_block), (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), + (OpHardfork::Bedrock.boxed(), genesis_info.bedrock_block), ]; let mut block_hardforks = hardfork_opts .into_iter() @@ -391,11 +387,11 @@ impl From for OpChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), - (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), - (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), - (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), - (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Regolith.boxed(), genesis_info.regolith_time), + (OpHardfork::Canyon.boxed(), genesis_info.canyon_time), + (OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time), + (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), + (OpHardfork::Granite.boxed(), genesis_info.granite_time), ]; let mut time_hardforks = time_hardfork_opts @@ -408,7 +404,7 @@ impl From for OpChainSpec { block_hardforks.append(&mut time_hardforks); // Ordered Hardforks - let mainnet_hardforks = OptimismHardfork::op_mainnet(); + let mainnet_hardforks = OpHardfork::op_mainnet(); let mainnet_order = mainnet_hardforks.forks_iter(); let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); @@ -464,7 +460,7 @@ impl OpGenesisInfo { BaseFeeParams::new(denominator as u128, elasticity as u128), ), ( - reth_optimism_forks::OptimismHardfork::Canyon.boxed(), + reth_optimism_forks::OpHardfork::Canyon.boxed(), BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), ), ] @@ -490,7 +486,7 @@ mod tests { use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; - use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; + use reth_optimism_forks::{OpHardfork, OptimismHardforks}; use crate::*; @@ -763,19 +759,19 @@ mod tests { BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60)) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); } #[test] @@ -829,25 +825,25 @@ mod tests { BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), + (OpHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), ] .into() ) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); } #[test] @@ -921,14 +917,14 @@ mod tests { }) ); - assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(chainspec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); - assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chainspec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); } #[test] fn test_fork_order_optimism_mainnet() { - use reth_optimism_forks::OptimismHardfork; + use reth_optimism_forks::OpHardfork; let genesis = Genesis { config: ChainConfig { @@ -984,14 +980,14 @@ mod tests { EthereumHardfork::ArrowGlacier.boxed(), EthereumHardfork::GrayGlacier.boxed(), EthereumHardfork::Paris.boxed(), - OptimismHardfork::Bedrock.boxed(), - OptimismHardfork::Regolith.boxed(), + OpHardfork::Bedrock.boxed(), + OpHardfork::Regolith.boxed(), EthereumHardfork::Shanghai.boxed(), - OptimismHardfork::Canyon.boxed(), + OpHardfork::Canyon.boxed(), EthereumHardfork::Cancun.boxed(), - OptimismHardfork::Ecotone.boxed(), - OptimismHardfork::Fjord.boxed(), - OptimismHardfork::Granite.boxed(), + OpHardfork::Ecotone.boxed(), + OpHardfork::Fjord.boxed(), + OpHardfork::Granite.boxed(), ]; assert!(expected_hardforks @@ -1022,8 +1018,8 @@ mod tests { } fn holocene_chainspec() -> Arc { - let mut hardforks = OptimismHardfork::base_sepolia(); - hardforks.insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + let mut hardforks = OpHardfork::base_sepolia(); + hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 5afb236cd33..fcbe7dee7dd 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -7,7 +7,7 @@ use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -24,11 +24,11 @@ pub static OP_MAINNET: LazyLock> = LazyLock::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_mainnet(), + hardforks: OpHardfork::op_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 31c9eda6bdd..35466cb2154 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -7,7 +7,7 @@ use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -22,11 +22,11 @@ pub static OP_SEPOLIA: LazyLock> = LazyLock::new(|| { "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_sepolia(), + hardforks: OpHardfork::op_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index b283356016c..813e451da25 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -2,7 +2,7 @@ use alloy_primitives::B256; use reth_chainspec::ChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; use reth_trie_common::root::ordered_trie_root_with_encoder; @@ -17,8 +17,8 @@ pub(crate) fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -50,8 +50,8 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index b00341ff677..f2d35ba56c4 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,6 +1,6 @@ use reth_ethereum_forks::{EthereumHardfork, Head}; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; /// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. /// @@ -12,17 +12,17 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) { + if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { revm_primitives::HOLOCENE - } else if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) { revm_primitives::REGOLITH } else { revm_primitives::BEDROCK @@ -31,19 +31,19 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Holocene).active_at_head(block) { + if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { revm_primitives::HOLOCENE - } else if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_head(block) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_head(block) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_head(block) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_head(block) { revm_primitives::REGOLITH - } else if chain_spec.fork(OptimismHardfork::Bedrock).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Bedrock).active_at_head(block) { revm_primitives::BEDROCK } else if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { revm_primitives::PRAGUE diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 8d701cda423..2b004e6eb9d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -19,7 +19,7 @@ use reth_evm::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; use reth_revm::{Database, State}; use revm_primitives::{ @@ -158,7 +158,7 @@ where let mut evm = self.evm_config.evm_with_env(&mut self.state, env); let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp); let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); @@ -233,7 +233,7 @@ where // this is only set for post-Canyon deposit transactions. deposit_receipt_version: (transaction.is_deposit() && self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) + .is_fork_active_at_timestamp(OpHardfork::Canyon, block.timestamp)) .then_some(1), }); } diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index cdd33510c92..6ff841b9ddc 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -6,7 +6,7 @@ use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use reth_primitives::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -215,14 +215,13 @@ impl RethL1BlockInfo for L1BlockInfo { return Ok(U256::ZERO) } - let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) - { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, timestamp) { SpecId::ECOTONE - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OpBlockExecutionError::L1BlockInfoError { @@ -239,12 +238,11 @@ impl RethL1BlockInfo for L1BlockInfo { timestamp: u64, input: &[u8], ) -> Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) - { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OpBlockExecutionError::L1BlockInfoError { @@ -270,9 +268,8 @@ where // If the canyon hardfork is active at the current timestamp, and it was not active at the // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) && - !chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp.saturating_sub(2)) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 328ef501c46..5fe77a31402 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -24,13 +24,13 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { EthereumHardfork::Paris.boxed(), ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, ), - (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), - (crate::OptimismHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)), + (crate::OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), ]) }); diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 440314e3711..9a9786a8fe0 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -18,7 +18,7 @@ hardfork!( /// /// When building a list of hardforks for a chain, it's still expected to mix with /// [`EthereumHardfork`]. - OptimismHardfork { + OpHardfork { /// Bedrock: . Bedrock, /// Regolith: . @@ -36,7 +36,7 @@ hardfork!( } ); -impl OptimismHardfork { +impl OpHardfork { /// Retrieves the activation block for the specified hardfork on the given chain. pub fn activation_block(self, fork: H, chain: Chain) -> Option { if chain == Chain::base_sepolia() { @@ -328,13 +328,13 @@ fn match_hardfork(fork: H, hardfork_fn: HF, optimism_hardfork_fn: OH where H: Hardfork, HF: Fn(&EthereumHardfork) -> Option, - OHF: Fn(&OptimismHardfork) -> Option, + OHF: Fn(&OpHardfork) -> Option, { let fork: &dyn Any = ⋔ if let Some(fork) = fork.downcast_ref::() { return hardfork_fn(fork) } - fork.downcast_ref::().and_then(optimism_hardfork_fn) + fork.downcast_ref::().and_then(optimism_hardfork_fn) } #[cfg(test)] @@ -346,35 +346,32 @@ mod tests { #[test] fn test_match_hardfork() { assert_eq!( - OptimismHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), + OpHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), Some(11188936) ); - assert_eq!( - OptimismHardfork::base_mainnet_activation_block(OptimismHardfork::Canyon), - Some(9101527) - ); + assert_eq!(OpHardfork::base_mainnet_activation_block(OpHardfork::Canyon), Some(9101527)); } #[test] fn check_op_hardfork_from_str() { let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe"]; let expected_hardforks = [ - OptimismHardfork::Bedrock, - OptimismHardfork::Regolith, - OptimismHardfork::Canyon, - OptimismHardfork::Ecotone, - OptimismHardfork::Fjord, - OptimismHardfork::Granite, + OpHardfork::Bedrock, + OpHardfork::Regolith, + OpHardfork::Canyon, + OpHardfork::Ecotone, + OpHardfork::Fjord, + OpHardfork::Granite, ]; - let hardforks: Vec = - hardfork_str.iter().map(|h| OptimismHardfork::from_str(h).unwrap()).collect(); + let hardforks: Vec = + hardfork_str.iter().map(|h| OpHardfork::from_str(h).unwrap()).collect(); assert_eq!(hardforks, expected_hardforks); } #[test] fn check_nonexistent_hardfork_from_str() { - assert!(OptimismHardfork::from_str("not a hardfork").is_err()); + assert!(OpHardfork::from_str("not a hardfork").is_err()); } } diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index bac0d0e04ed..df159161e0e 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -14,47 +14,47 @@ pub mod hardfork; mod dev; pub use dev::DEV_HARDFORKS; -pub use hardfork::OptimismHardfork; +pub use hardfork::OpHardfork; use reth_ethereum_forks::EthereumHardforks; /// Extends [`EthereumHardforks`] with optimism helper methods. pub trait OptimismHardforks: EthereumHardforks { - /// Convenience method to check if [`OptimismHardfork::Bedrock`] is active at a given block + /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) + self.fork(OpHardfork::Bedrock).active_at_block(block_number) } - /// Returns `true` if [`Canyon`](OptimismHardfork::Canyon) is active at given block timestamp. + /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) + self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Ecotone`](OpHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) } - /// Returns `true` if [`Fjord`](OptimismHardfork::Fjord) is active at given block timestamp. + /// Returns `true` if [`Fjord`](OpHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) + self.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) } - /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. + /// Returns `true` if [`Granite`](OpHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) + self.fork(OpHardfork::Granite).active_at_timestamp(timestamp) } - /// Returns `true` if [`Holocene`](OptimismHardfork::Holocene) is active at given block + /// Returns `true` if [`Holocene`](OpHardfork::Holocene) is active at given block /// timestamp. fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Holocene).active_at_timestamp(timestamp) + self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) } - /// Returns `true` if [`Regolith`](OptimismHardfork::Regolith) is active at given block + /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block /// timestamp. fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) + self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) } } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index e337a23551e..7400f149a96 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -14,7 +14,7 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; +use reth_optimism_forks::{OpHardfork, OptimismHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; /// The types used in the optimism beacon consensus engine. @@ -81,7 +81,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -178,10 +178,9 @@ mod test { use super::*; fn get_chainspec(is_holocene: bool) -> Arc { - let mut hardforks = OptimismHardfork::base_sepolia(); + let mut hardforks = OpHardfork::base_sepolia(); if is_holocene { - hardforks - .insert(OptimismHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); } Arc::new(OpChainSpec { inner: ChainSpec { From f86efcc800b4e8f0884b76cd9960dc9f584aae31 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 08:29:27 +0100 Subject: [PATCH 386/970] chore: clippy happy (#12419) --- crates/node/core/src/args/payload_builder.rs | 2 +- crates/node/core/src/version.rs | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index 524a93195de..cd7ba7dccfb 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -86,7 +86,7 @@ impl TypedValueParser for ExtradataValueParser { ) -> Result { let val = value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE { + if val.len() > MAXIMUM_EXTRA_DATA_SIZE { return Err(clap::Error::raw( clap::error::ErrorKind::InvalidValue, format!( diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index 84fcf3f0f11..4bf2dc56f39 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -144,9 +144,6 @@ mod tests { #[test] fn assert_extradata_less_32bytes() { let extradata = default_extradata(); - assert!( - extradata.as_bytes().len() <= 32, - "extradata must be less than 32 bytes: {extradata}" - ) + assert!(extradata.len() <= 32, "extradata must be less than 32 bytes: {extradata}") } } From 7a65cce1e1dd456d1af5202f6604bc8cf8dcff18 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 08:29:41 +0100 Subject: [PATCH 387/970] chore: rm fqs for NodeCore (#12418) --- crates/rpc/rpc/src/debug.rs | 8 ++++---- crates/rpc/rpc/src/eth/bundle.rs | 4 ++-- crates/rpc/rpc/src/eth/sim_bundle.rs | 8 ++------ crates/rpc/rpc/src/trace.rs | 4 ++-- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 4404515e03c..a74d1b5a155 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -27,7 +27,7 @@ use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, RpcNodeCore, + EthApiTypes, FromEthApiError, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -115,7 +115,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - RpcNodeCore::evm_config(this.eth_api()).tx_env(tx, *signer), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; @@ -264,7 +264,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env, - RpcNodeCore::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + this.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ), handler_cfg: cfg.handler_cfg, }; @@ -533,7 +533,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - RpcNodeCore::evm_config(this.eth_api()).tx_env(tx, *signer), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index db8c01c5c3b..a2e0be30437 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -167,7 +167,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = RpcNodeCore::evm_config(ð_api).evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -188,7 +188,7 @@ where .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; - RpcNodeCore::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index f49d7984f8b..40d951f755f 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -307,7 +307,7 @@ where let mut refundable_value = U256::ZERO; let mut body_logs: Vec = Vec::new(); - let mut evm = RpcNodeCore::evm_config(ð_api).evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, env); for item in &flattened_bundle { // Check inclusion constraints @@ -323,11 +323,7 @@ where ) .into()); } - RpcNodeCore::evm_config(ð_api).fill_tx_env( - evm.tx_mut(), - &item.tx, - item.signer, - ); + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &item.tx, item.signer); let ResultAndState { result, state } = evm.transact().map_err(EthApiError::from_eth_err)?; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 41bc0ad2098..45c5f1a3bc3 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -22,7 +22,7 @@ use reth_primitives::Header; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -122,7 +122,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block, - RpcNodeCore::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), + self.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ); let config = TracingInspectorConfig::from_parity_config(&trace_types); From b5fce61738d28a8d5c76a925025d78f833c11963 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Sat, 9 Nov 2024 15:42:37 +0800 Subject: [PATCH 388/970] ecies: use align_num value (#12139) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/net/ecies/src/algorithm.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index e4266d9a06f..f799b6c7f6c 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -688,7 +688,7 @@ impl ECIES { pub fn body_len(&self) -> usize { let len = self.body_size.unwrap(); - (if len % 16 == 0 { len } else { (len / 16 + 1) * 16 }) + 16 + Self::align_16(len) + 16 } #[cfg(test)] @@ -699,7 +699,7 @@ impl ECIES { } pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) { - let len = if data.len() % 16 == 0 { data.len() } else { (data.len() / 16 + 1) * 16 }; + let len = Self::align_16(data.len()); let old_len = out.len(); out.resize(old_len + len, 0); @@ -732,6 +732,14 @@ impl ECIES { self.ingress_aes.as_mut().unwrap().apply_keystream(ret); Ok(split_at_mut(ret, size)?.0) } + + /// Returns `num` aligned to 16. + /// + /// `` + #[inline] + const fn align_16(num: usize) -> usize { + (num + (16 - 1)) & !(16 - 1) + } } #[cfg(test)] From d2f494bd88b17af65244473bfe81e676dc8c422f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 9 Nov 2024 08:55:06 +0100 Subject: [PATCH 389/970] primitives: replace primitive `Withdrawals` with alloy (#12119) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> Co-authored-by: Matthias Seitz --- .../cli/commands/src/test_vectors/compact.rs | 5 +- crates/evm/src/state_change.rs | 4 +- crates/optimism/storage/src/lib.rs | 4 +- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/withdrawal.rs | 85 ++----------------- crates/storage/codecs/src/alloy/withdrawal.rs | 29 ++++++- crates/storage/db-api/src/models/mod.rs | 4 +- 7 files changed, 41 insertions(+), 92 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index c2995170057..c498718e9fc 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -1,3 +1,4 @@ +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{hex, private::getrandom::getrandom, PrimitiveSignature, TxKind}; use arbitrary::Arbitrary; use eyre::{Context, Result}; @@ -22,7 +23,7 @@ use reth_db::{ use reth_fs_util as fs; use reth_primitives::{ Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, - TransactionSignedNoHash, TxType, Withdrawals, + TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -75,7 +76,6 @@ compact_types!( // reth-primitives Account, Receipt, - Withdrawals, ReceiptWithBloom, // reth_codecs::alloy Authorization, @@ -83,6 +83,7 @@ compact_types!( Header, HeaderExt, Withdrawal, + Withdrawals, TxEip2930, TxEip1559, TxEip4844, diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 2d91ac30eeb..0e207fc2dbe 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -4,7 +4,7 @@ use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Block, Withdrawals}; +use reth_primitives::Block; /// Collect all balance changes at the end of the block. /// @@ -37,7 +37,7 @@ pub fn post_block_balance_increments( insert_post_block_withdrawals_balance_increments( chain_spec, block.timestamp, - block.body.withdrawals.as_ref().map(Withdrawals::as_ref), + block.body.withdrawals.as_ref().map(|w| w.as_slice()), &mut balance_increments, ); diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 347b690c5c7..c3b8a71feea 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -47,7 +47,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); // In case of failure, refer to the documentation of the // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling @@ -73,6 +72,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); } } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 3316e713541..9f27726aeca 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -37,7 +37,7 @@ pub mod block; pub use block::{body::BlockBody, Block, FullBlock}; mod withdrawal; -pub use withdrawal::Withdrawals; +pub use withdrawal::{Withdrawal, Withdrawals}; mod error; pub use error::{GotExpected, GotExpectedBoxed}; diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 8f072afa578..9c6d8b69797 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,86 +1,11 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -use alloc::vec::Vec; -use alloy_eips::eip4895::Withdrawal; -use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; +/// Re-export from `alloy_eips`. +#[doc(inline)] +pub use alloy_eips::eip4895::Withdrawal; /// Represents a collection of Withdrawals. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - From, - AsRef, - Deref, - DerefMut, - IntoIterator, - RlpEncodableWrapper, - RlpDecodableWrapper, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -#[as_ref(forward)] -pub struct Withdrawals(Vec); - -impl Withdrawals { - /// Create a new Withdrawals instance. - pub const fn new(withdrawals: Vec) -> Self { - Self(withdrawals) - } - - /// Calculate the total size, including capacity, of the Withdrawals. - #[inline] - pub fn total_size(&self) -> usize { - self.capacity() * core::mem::size_of::() - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawals]. - #[inline] - pub fn size(&self) -> usize { - self.len() * core::mem::size_of::() - } - - /// Get an iterator over the Withdrawals. - pub fn iter(&self) -> core::slice::Iter<'_, Withdrawal> { - self.0.iter() - } - - /// Get a mutable iterator over the Withdrawals. - pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, Withdrawal> { - self.0.iter_mut() - } - - /// Convert [Self] into raw vec of withdrawals. - pub fn into_inner(self) -> Vec { - self.0 - } -} - -impl<'a> IntoIterator for &'a Withdrawals { - type Item = &'a Withdrawal; - type IntoIter = core::slice::Iter<'a, Withdrawal>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> IntoIterator for &'a mut Withdrawals { - type Item = &'a mut Withdrawal; - type IntoIter = core::slice::IterMut<'a, Withdrawal>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} +pub type Withdrawals = alloy_eips::eip4895::Withdrawals; #[cfg(test)] mod tests { @@ -89,6 +14,8 @@ mod tests { use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; use proptest_arbitrary_interop::arb; + use reth_codecs::{add_arbitrary_tests, Compact}; + use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// Withdrawal type natively diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 8aa5671798d..09e80d1faa7 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -1,7 +1,8 @@ //! Compact implementation for [`AlloyWithdrawal`] use crate::Compact; -use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloc::vec::Vec; +use alloy_eips::eip4895::{Withdrawal as AlloyWithdrawal, Withdrawals}; use alloy_primitives::Address; use reth_codecs_derive::add_arbitrary_tests; @@ -53,6 +54,22 @@ impl Compact for AlloyWithdrawal { } } +impl Compact for Withdrawals { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_ref().to_compact(buf) + } + + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let (withdrawals, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let alloy_withdrawals = Self::new(withdrawals); + (alloy_withdrawals, buf) + } +} + #[cfg(test)] mod tests { use super::*; @@ -61,12 +78,20 @@ mod tests { proptest! { #[test] - fn roundtrip(withdrawal in arb::()) { + fn roundtrip_withdrawal(withdrawal in arb::()) { let mut compacted_withdrawal = Vec::::new(); let len = withdrawal.to_compact(&mut compacted_withdrawal); let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); assert_eq!(withdrawal, decoded) } + + #[test] + fn roundtrip_withdrawals(withdrawals in arb::()) { + let mut compacted_withdrawals = Vec::::new(); + let len = withdrawals.to_compact(&mut compacted_withdrawals); + let (decoded, _) = Withdrawals::from_compact(&compacted_withdrawals, len); + assert_eq!(withdrawals, decoded); + } } // each value in the database has an extra field named flags that encodes metadata about other diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index b077027f297..fc3351b73b6 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -313,7 +313,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -341,7 +341,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero); validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero); @@ -364,6 +363,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); } } From 08451ef278dfaa8818738d7d67e653833c52f6b7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 10:05:15 +0100 Subject: [PATCH 390/970] chore: rm unused file (#12420) --- .../{transaction/mod.rs => transaction.rs} | 0 .../src/transaction/signature.rs | 52 ------------------- 2 files changed, 52 deletions(-) rename crates/rpc/rpc-types-compat/src/{transaction/mod.rs => transaction.rs} (100%) delete mode 100644 crates/rpc/rpc-types-compat/src/transaction/signature.rs diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction.rs similarity index 100% rename from crates/rpc/rpc-types-compat/src/transaction/mod.rs rename to crates/rpc/rpc-types-compat/src/transaction.rs diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs deleted file mode 100644 index 77ae365b2da..00000000000 --- a/crates/rpc/rpc-types-compat/src/transaction/signature.rs +++ /dev/null @@ -1,52 +0,0 @@ -use alloy_primitives::{Signature as PrimitiveSignature, U256}; -use alloy_rpc_types::{Parity, Signature}; -use reth_primitives::{transaction::legacy_parity, TxType}; - -/// Creates a new rpc signature from a legacy [primitive -/// signature](alloy_primitives::Signature), using the give chain id to compute the signature's -/// recovery id. -/// -/// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). -pub fn from_legacy_primitive_signature( - signature: PrimitiveSignature, - chain_id: Option, -) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(legacy_parity(&signature, chain_id).to_u64()), - y_parity: None, - } -} - -/// Creates a new rpc signature from a non-legacy [primitive -/// signature](alloy_primitives::Signature). This sets the `v` value to `0` or `1` depending on -/// the signature's `odd_y_parity`. -pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(signature.v().y_parity_byte()), - y_parity: Some(Parity(signature.v().y_parity())), - } -} - -/// Creates a new rpc signature from a legacy [primitive -/// signature](alloy_primitives::Signature). -/// -/// The tx type is used to determine whether or not to use the `chain_id` to compute the -/// signature's recovery id. -/// -/// If the transaction is a legacy transaction, it will use the `chain_id` to compute the -/// signature's recovery id. If the transaction is a typed transaction, it will set the `v` -/// value to `0` or `1` depending on the signature's `odd_y_parity`. -pub fn from_primitive_signature( - signature: PrimitiveSignature, - tx_type: TxType, - chain_id: Option, -) -> Signature { - match tx_type { - TxType::Legacy => from_legacy_primitive_signature(signature, chain_id), - _ => from_typed_primitive_signature(signature), - } -} From 430fe0de18547f38786a212f365364c1d4902b77 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 9 Nov 2024 10:20:25 +0100 Subject: [PATCH 391/970] chore(sdk): Add `NodePrimitives::Transaction` and `NodePrimitives::SignedTx` (#12330) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 - crates/ethereum/node/src/node.rs | 3 ++- crates/node/types/Cargo.toml | 1 - crates/node/types/src/lib.rs | 11 ++++++----- crates/optimism/node/src/node.rs | 3 ++- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22c5cb8ae50..3433f063ff1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8106,7 +8106,6 @@ dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", - "reth-primitives", "reth-primitives-traits", "reth-trie-db", ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 95542411d21..e545a3c73c4 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -25,7 +25,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt}; +use reth_primitives::{Block, Header, Receipt, TransactionSigned}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -43,6 +43,7 @@ pub struct EthPrimitives; impl NodePrimitives for EthPrimitives { type Block = Block; + type SignedTx = TransactionSigned; type Receipt = Receipt; } diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index 21facae5460..cc33aac30ff 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -15,6 +15,5 @@ workspace = true reth-chainspec.workspace = true reth-db-api.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-trie-db.workspace = true diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 6c3ed9ca46e..afb650ada2b 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -8,28 +8,29 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use reth_primitives_traits::{Block, BlockBody}; - -use std::marker::PhantomData; - use reth_chainspec::EthChainSpec; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; use reth_engine_primitives::EngineTypes; +pub use reth_primitives_traits::{Block, BlockBody}; use reth_trie_db::StateCommitment; +use std::marker::PhantomData; /// Configures all the primitive types of the node. pub trait NodePrimitives { /// Block primitive. type Block; + /// Signed version of the transaction type. + type SignedTx; /// A receipt. type Receipt; } impl NodePrimitives for () { - type Block = reth_primitives::Block; + type Block = (); + type SignedTx = (); type Receipt = (); } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a09dfbaa562..c375c36a87d 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -24,7 +24,7 @@ use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt}; +use reth_primitives::{Block, Header, Receipt, TransactionSigned}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -46,6 +46,7 @@ pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { type Block = Block; + type SignedTx = TransactionSigned; type Receipt = Receipt; } From a299f501ce3ca8a7dfc13a9845a6d701e2a05e62 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 9 Nov 2024 10:53:33 +0100 Subject: [PATCH 392/970] chore(sdk): payload builder AT on `NodeComponents` and `FullNodeComponents` (#11529) Co-authored-by: Matthias Seitz --- Cargo.lock | 2 -- crates/exex/exex/Cargo.toml | 1 - crates/exex/exex/src/context.rs | 13 +++------- crates/node/api/Cargo.toml | 1 - crates/node/api/src/node.rs | 15 ++++++----- crates/node/builder/src/builder/states.rs | 25 ++++++++++--------- crates/node/builder/src/components/builder.rs | 19 ++++++++------ crates/node/builder/src/components/mod.rs | 13 +++++----- crates/node/builder/src/rpc.rs | 11 ++++---- crates/optimism/node/src/node.rs | 12 ++++++--- 10 files changed, 57 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3433f063ff1..98fe61e94c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7540,7 +7540,6 @@ dependencies = [ "reth-metrics", "reth-node-api", "reth-node-core", - "reth-payload-builder", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -7885,7 +7884,6 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", - "reth-payload-builder", "reth-payload-primitives", "reth-primitives", "reth-provider", diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 903e11e784e..f7ab4fce5df 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -25,7 +25,6 @@ reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-payload-builder.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } reth-primitives-traits.workspace = true reth-provider.workspace = true diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 23d772b738a..4e0d9f5956c 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,14 +1,12 @@ -use std::fmt::Debug; - +use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; use reth_exex_types::ExExHead; -use reth_node_api::{FullNodeComponents, NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_tasks::TaskExecutor; +use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; - /// Captures the context that an `ExEx` has access to. pub struct ExExContext { /// The current head of the blockchain at launch. @@ -97,10 +95,7 @@ where } /// Returns the handle to the payload builder service. - pub fn payload_builder( - &self, - ) -> &reth_payload_builder::PayloadBuilderHandle<::Engine> - { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.components.payload_builder() } diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index 6b263d6c532..b2bf001862e 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -18,7 +18,6 @@ reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true -reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-network-api.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index b016e01c295..253145ea9eb 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,7 +1,6 @@ //! Traits for configuring a node. -use std::{future::Future, marker::PhantomData}; - +use crate::ConfigureEvm; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::Consensus; @@ -9,13 +8,12 @@ use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_primitives::PayloadBuilder; use reth_primitives::Header; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; - -use crate::ConfigureEvm; +use std::{future::Future, marker::PhantomData}; /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful /// components to the node. @@ -63,6 +61,9 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Network API. type Network: FullNetwork; + /// Builds new blocks. + type PayloadBuilder: PayloadBuilder + Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -79,9 +80,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine>; + fn payload_builder(&self) -> &Self::PayloadBuilder; /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index ca5a57d0db6..16b7d668ca3 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -5,16 +5,6 @@ //! The node builder process is essentially a state machine that transitions through various states //! before the node can be launched. -use std::{fmt, future::Future}; - -use reth_exex::ExExContext; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; -use reth_node_core::node_config::NodeConfig; -use reth_payload_builder::PayloadBuilderHandle; -use reth_tasks::TaskExecutor; - use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, @@ -22,6 +12,13 @@ use crate::{ rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, AddOns, FullNode, }; +use reth_exex::ExExContext; +use reth_node_api::{ + FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, PayloadBuilder, +}; +use reth_node_core::node_config::NodeConfig; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; /// A node builder that also has the configured types. pub struct NodeBuilderWithTypes { @@ -91,12 +88,16 @@ impl> FullNodeTypes for NodeAdapter type Provider = T::Provider; } -impl> FullNodeComponents for NodeAdapter { +impl> FullNodeComponents for NodeAdapter +where + C::PayloadBuilder: PayloadBuilder, +{ type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; type Consensus = C::Consensus; type Network = C::Network; + type PayloadBuilder = C::PayloadBuilder; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -118,7 +119,7 @@ impl> FullNodeComponents for NodeAdapter< self.components.network() } - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { self.components.payload_builder() } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 48a0ba9b5fd..41ce36858d8 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,12 +1,5 @@ //! A generic [`NodeComponentsBuilder`] -use std::{future::Future, marker::PhantomData}; - -use reth_consensus::Consensus; -use reth_evm::execute::BlockExecutorProvider; -use reth_primitives::Header; -use reth_transaction_pool::TransactionPool; - use crate::{ components::{ Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents, @@ -14,6 +7,13 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use reth_consensus::Consensus; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_api::NodeTypesWithEngine; +use reth_payload_builder::PayloadBuilderHandle; +use reth_primitives::Header; +use reth_transaction_pool::TransactionPool; +use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// @@ -358,7 +358,10 @@ impl Default for ComponentsBuilder<(), (), (), (), (), ()> { /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types - type Components: NodeComponents; + type Components: NodeComponents< + Node, + PayloadBuilder = PayloadBuilderHandle<::Engine>, + >; /// Consumes the type and returns the created components. fn build_components( diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 42001fc1005..29b667d5409 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -21,6 +21,7 @@ pub use network::*; pub use payload::*; pub use pool::*; +use crate::{ConfigureEvm, FullNodeTypes}; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; @@ -30,8 +31,6 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::Header; use reth_transaction_pool::TransactionPool; -use crate::{ConfigureEvm, FullNodeTypes}; - /// An abstraction over the components of a node, consisting of: /// - evm and executor /// - transaction pool @@ -53,6 +52,9 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Network API. type Network: FullNetwork; + /// Builds new blocks. + type PayloadBuilder: Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -69,7 +71,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; + fn payload_builder(&self) -> &Self::PayloadBuilder; } /// All the components of the node. @@ -105,6 +107,7 @@ where type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; + type PayloadBuilder = PayloadBuilderHandle<::Engine>; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -126,9 +129,7 @@ where &self.network } - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { &self.payload_builder } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 8af1527cbcd..4530bbe7014 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -16,7 +16,7 @@ use reth_node_core::{ node_config::NodeConfig, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; -use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_builder::PayloadStore; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -294,9 +294,7 @@ where } /// Returns the handle to the payload builder service - pub fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.node.payload_builder() } } @@ -402,7 +400,10 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Types: ProviderNodeTypes, + PayloadBuilder: Into::Engine>>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c375c36a87d..6edab570e29 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -23,7 +23,7 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; use reth_primitives::{Block, Header, Receipt, TransactionSigned}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; @@ -149,7 +149,10 @@ impl OpAddOns { impl NodeAddOns for OpAddOns where - N: FullNodeComponents>, + N: FullNodeComponents< + Types: NodeTypes, + PayloadBuilder: Into::Engine>>, + >, OpEngineValidator: EngineValidator<::Engine>, { type Handle = RpcHandle>; @@ -164,7 +167,10 @@ where impl RethRpcAddOns for OpAddOns where - N: FullNodeComponents>, + N: FullNodeComponents< + Types: NodeTypes, + PayloadBuilder: Into::Engine>>, + >, OpEngineValidator: EngineValidator<::Engine>, { type EthApi = OpEthApi; From ae257f5685b07451c836aed10f2ceeb0e69619a4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 13:51:45 +0100 Subject: [PATCH 393/970] chore: restrict payload builder error type (#12423) --- crates/payload/primitives/src/traits.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 7ae558b9945..a77f516da8d 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,4 @@ -use crate::{PayloadEvents, PayloadKind, PayloadTypes}; +use crate::{PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes}; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; @@ -12,7 +12,7 @@ pub trait PayloadBuilder: Send + Unpin { /// The Payload type for the builder. type PayloadType: PayloadTypes; /// The error type returned by the builder. - type Error; + type Error: Into; /// Sends a message to the service to start building a new payload for the given payload. /// From 59ebebaa6332982b970c7544803bb3bf5390787e Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 9 Nov 2024 14:09:46 +0100 Subject: [PATCH 394/970] primitives: rm alloy `Withdrawals` reexport (#12421) Co-authored-by: Matthias Seitz --- Cargo.lock | 3 +++ crates/blockchain-tree/src/blockchain_tree.rs | 3 +-- crates/consensus/common/src/validation.rs | 9 +++++---- .../ethereum/engine-primitives/src/payload.rs | 4 ++-- crates/optimism/payload/src/payload.rs | 4 ++-- crates/payload/basic/src/lib.rs | 4 ++-- crates/payload/basic/src/stack.rs | 3 ++- crates/payload/primitives/src/traits.rs | 7 +++++-- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/withdrawal.rs | 3 --- crates/primitives/src/block.rs | 9 ++++----- crates/primitives/src/lib.rs | 2 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 3 ++- crates/rpc/rpc-types-compat/src/block.rs | 6 +++--- .../rpc/rpc-types-compat/src/engine/payload.rs | 3 ++- crates/storage/db-models/Cargo.toml | 18 ++++++++++-------- crates/storage/db-models/src/blocks.rs | 2 +- .../src/providers/blockchain_provider.rs | 12 ++++++------ .../provider/src/providers/consistent.rs | 4 ++-- .../provider/src/providers/database/mod.rs | 7 +++++-- .../src/providers/database/provider.rs | 6 ++++-- crates/storage/provider/src/providers/mod.rs | 7 +++++-- .../src/providers/static_file/manager.rs | 7 +++++-- .../storage/provider/src/test_utils/blocks.rs | 4 ++-- crates/storage/provider/src/test_utils/mock.rs | 7 +++++-- crates/storage/provider/src/test_utils/noop.rs | 7 +++++-- crates/storage/storage-api/src/withdrawals.rs | 6 ++++-- examples/custom-engine-types/Cargo.toml | 1 + examples/custom-engine-types/src/main.rs | 2 +- testing/ef-tests/Cargo.toml | 13 +++++++++---- testing/ef-tests/src/models.rs | 3 ++- testing/testing-utils/src/generators.rs | 4 ++-- 32 files changed, 104 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98fe61e94c7..f81a026bd5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2611,6 +2611,7 @@ dependencies = [ name = "ef-tests" version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rlp", "rayon", @@ -2852,6 +2853,7 @@ dependencies = [ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types", @@ -6915,6 +6917,7 @@ dependencies = [ name = "reth-db-models" version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", "arbitrary", "bytes", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 64705e5ccb5..20d1cfe9f1d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1375,7 +1375,7 @@ where mod tests { use super::*; use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; - use alloy_eips::eip1559::INITIAL_BASE_FEE; + use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, Sealable, B256}; use assert_matches::assert_matches; @@ -1390,7 +1390,6 @@ mod tests { proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, Account, BlockBody, Header, Transaction, TransactionSigned, TransactionSignedEcRecovered, - Withdrawals, }; use reth_provider::{ test_utils::{ diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 46a9e4d1572..5a74433e58b 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -275,7 +275,10 @@ pub fn validate_against_parent_4844( mod tests { use super::*; use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; - use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; + use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, + }; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, Sealable, U256, @@ -283,9 +286,7 @@ mod tests { use mockall::mock; use rand::Rng; use reth_chainspec::ChainSpecBuilder; - use reth_primitives::{ - proofs, Account, BlockBody, Transaction, TransactionSigned, Withdrawals, - }; + use reth_primitives::{proofs, Account, BlockBody, Transaction, TransactionSigned}; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, }; diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 3fe4e76e63f..094a1df2657 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,6 +1,6 @@ //! Contains types required for building a payload. -use alloy_eips::{eip4844::BlobTransactionSidecar, eip7685::Requests}; +use alloy_eips::{eip4844::BlobTransactionSidecar, eip4895::Withdrawals, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ @@ -9,7 +9,7 @@ use alloy_rpc_types_engine::{ }; use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 37224716c75..36f11ee628b 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,7 +2,7 @@ use alloy_eips::{ eip1559::BaseFeeParams, eip2718::Decodable2718, eip4844::BlobTransactionSidecar, - eip7685::Requests, + eip4895::Withdrawals, eip7685::Requests, }; use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; @@ -16,7 +16,7 @@ use reth_chainspec::EthereumHardforks; use reth_optimism_chainspec::OpChainSpec; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{transaction::WithEncoded, SealedBlock, TransactionSigned, Withdrawals}; +use reth_primitives::{transaction::WithEncoded, SealedBlock, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 0fc63a5a149..6f2038ba4b4 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,7 +10,7 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::merge::SLOT_DURATION; +use alloy_eips::{eip4895::Withdrawals, merge::SLOT_DURATION}; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; @@ -20,7 +20,7 @@ use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJo use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader, Withdrawals}; +use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader}; use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs index 722399ab278..45a3f3b4244 100644 --- a/crates/payload/basic/src/stack.rs +++ b/crates/payload/basic/src/stack.rs @@ -3,10 +3,11 @@ use crate::{ PayloadConfig, }; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{Address, B256, U256}; use reth_payload_builder::PayloadId; use reth_payload_primitives::BuiltPayload; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; use alloy_eips::eip7685::Requests; use std::{error::Error, fmt}; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index a77f516da8d..160808854a9 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,9 +1,12 @@ use crate::{PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes}; -use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + eip7685::Requests, +}; use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; use tokio::sync::oneshot; /// A type that can request, subscribe to and resolve payloads. diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 9f27726aeca..ec93f2a2163 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -37,7 +37,7 @@ pub mod block; pub use block::{body::BlockBody, Block, FullBlock}; mod withdrawal; -pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal::Withdrawal; mod error; pub use error::{GotExpected, GotExpectedBoxed}; diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 9c6d8b69797..699229684ec 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -4,9 +4,6 @@ #[doc(inline)] pub use alloy_eips::eip4895::Withdrawal; -/// Represents a collection of Withdrawals. -pub type Withdrawals = alloy_eips::eip4895::Withdrawals; - #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5f32728489c..54bcb27293c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,8 +1,6 @@ -use crate::{ - GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, -}; +use crate::{GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; use alloc::vec::Vec; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; @@ -655,8 +653,9 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::serde_bincode_compat::Header; + use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Address; - use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Withdrawals}; + use reth_primitives_traits::serde_bincode_compat::SealedHeader; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c16c4d3f42f..f44e1ee6a09 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -42,7 +42,7 @@ pub use receipt::{ }; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, SealedHeader, StorageEntry, Withdrawals, + LogData, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index febbc291e35..f341fd0474c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,5 +1,6 @@ //! Some payload tests +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{Bytes, Sealable, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ @@ -7,7 +8,7 @@ use alloy_rpc_types_engine::{ PayloadError, }; use assert_matches::assert_matches; -use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; +use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, try_payload_v1_to_block, diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index cfa1561c634..41bd057dfd6 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,14 +1,14 @@ //! Compatibility functions for rpc `Block` type. +use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; use alloy_consensus::Sealed; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, Withdrawals}; - -use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; /// Converts the given primitive block into a [`Block`] response with the given /// [`BlockTransactionsKind`] diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index b4c45a61781..9050b0cced1 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -4,6 +4,7 @@ use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, + eip4895::Withdrawals, eip7685::Requests, }; use alloy_primitives::{B256, U256}; @@ -14,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, Header, SealedBlock, TransactionSigned, Withdrawals, + Block, BlockBody, Header, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 44b291959ba..59d95c2263d 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true +alloy-eips.workspace = true # codecs modular-bitfield.workspace = true @@ -42,14 +43,15 @@ test-fuzz.workspace = true [features] test-utils = [ - "reth-primitives-traits/test-utils", - "arbitrary", - "reth-codecs/test-utils" + "reth-primitives-traits/test-utils", + "arbitrary", + "reth-codecs/test-utils", ] arbitrary = [ - "reth-primitives-traits/arbitrary", - "dep:arbitrary", - "dep:proptest", - "alloy-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-primitives-traits/arbitrary", + "dep:arbitrary", + "dep:proptest", + "alloy-primitives/arbitrary", + "alloy-eips/arbitrary", + "reth-codecs/arbitrary", ] diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index b4399dc1e27..ed1d7fb6772 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -1,8 +1,8 @@ use std::ops::Range; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives_traits::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 669a3555931..dbfb4f7b872 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,7 +9,10 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -25,7 +28,6 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -768,7 +770,7 @@ mod tests { BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; - use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; + use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; @@ -786,9 +788,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{ - Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, Withdrawals, - }; + use reth_primitives::{Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 786a2f2b108..98f7820e34a 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -7,7 +7,8 @@ use crate::{ TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{ - eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, }; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; @@ -19,7 +20,6 @@ use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 38918f52c23..bb532329ee3 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,7 +7,10 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -18,7 +21,7 @@ use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index c76f77572ab..9ba20306f37 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,7 +15,10 @@ use crate::{ StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; @@ -42,7 +45,6 @@ use reth_primitives::{ Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 5e95c6ce0db..c859ddba8a5 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -7,7 +7,10 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, @@ -21,7 +24,7 @@ use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 66914b00abc..cb270a6da46 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -7,7 +7,10 @@ use crate::{ ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; @@ -31,7 +34,7 @@ use reth_primitives::{ DEFAULT_BLOCKS_PER_STATIC_FILE, }, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawals, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 2c9c108139c..19a6cbf6a5c 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -6,14 +6,14 @@ use alloy_primitives::{ U256, }; -use alloy_eips::eip4895::Withdrawal; +use alloy_eips::eip4895::{Withdrawal, Withdrawals}; use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TxType, Withdrawals, + Transaction, TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index b5593b9040d..6e4331566db 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -6,7 +6,10 @@ use crate::{ StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::constants::EMPTY_ROOT_HASH; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, +}; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -23,7 +26,7 @@ use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawals, + TransactionSignedNoHash, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 65c08306239..7c3848b4a53 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,7 +4,10 @@ use std::{ sync::Arc, }; -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -20,7 +23,7 @@ use reth_evm::ConfigureEvmEnv; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawals, + TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs index ba422a3b33b..47aa4944410 100644 --- a/crates/storage/storage-api/src/withdrawals.rs +++ b/crates/storage/storage-api/src/withdrawals.rs @@ -1,5 +1,7 @@ -use alloy_eips::{eip4895::Withdrawal, BlockHashOrNumber}; -use reth_primitives::Withdrawals; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 1fbb3c4947a..9afd16bea16 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -20,6 +20,7 @@ reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true +alloy-eips.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 896a4b55f6b..704ecb7e3c4 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -22,6 +22,7 @@ use std::{convert::Infallible, sync::Arc}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use alloy_rpc_types::{ @@ -68,7 +69,6 @@ use reth_payload_builder::{ EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; use reth_trie_db::MerklePatriciaTrie; diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index a56c44ec3db..de46f62675c 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -14,15 +14,19 @@ workspace = true [features] ef-tests = [] asm-keccak = [ - "reth-primitives/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak" + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", ] [dependencies] reth-chainspec.workspace = true reth-primitives.workspace = true -reth-db = { workspace = true, features = ["mdbx", "test-utils", "disable-lock"] } +reth-db = { workspace = true, features = [ + "mdbx", + "test-utils", + "disable-lock", +] } reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true @@ -33,6 +37,7 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index b5dc073c1da..2b6b3baa81e 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,7 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; @@ -9,7 +10,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{ - Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, Withdrawals, + Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, }; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 57c9acedfea..c24840a2633 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,7 +1,7 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. use alloy_consensus::{Transaction as _, TxLegacy}; -use alloy_eips::eip4895::Withdrawal; +use alloy_eips::eip4895::{Withdrawal, Withdrawals}; use alloy_primitives::{Address, BlockNumber, Bytes, Sealable, TxKind, B256, U256}; pub use rand::Rng; use rand::{ @@ -9,7 +9,7 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, - StorageEntry, Transaction, TransactionSigned, Withdrawals, + StorageEntry, Transaction, TransactionSigned, }; use secp256k1::{Keypair, Secp256k1}; use std::{ From 1b1f0f3ef863b8f9ee19e35ba462c409711f62ed Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 17:58:31 +0100 Subject: [PATCH 395/970] chore: add Sync to PayloadBuilder (#12425) --- crates/payload/primitives/src/traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 160808854a9..d66b0add459 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -11,7 +11,7 @@ use tokio::sync::oneshot; /// A type that can request, subscribe to and resolve payloads. #[async_trait::async_trait] -pub trait PayloadBuilder: Send + Unpin { +pub trait PayloadBuilder: Send + Sync + Unpin { /// The Payload type for the builder. type PayloadType: PayloadTypes; /// The error type returned by the builder. From 7eaa0a8f9f63424eb095ffca11060948ad273c79 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Nov 2024 18:27:18 +0100 Subject: [PATCH 396/970] chore: add PayloadBuilder to RpcNodeCore (#12428) --- crates/optimism/rpc/src/eth/mod.rs | 6 ++++++ crates/rpc/rpc-eth-api/src/node.rs | 12 ++++++++++++ crates/rpc/rpc/src/eth/core.rs | 5 +++++ 3 files changed, 23 insertions(+) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index dc6e8e59fa6..624602bba38 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -121,6 +121,7 @@ where type Pool = N::Pool; type Evm = ::Evm; type Network = ::Network; + type PayloadBuilder = (); #[inline] fn pool(&self) -> &Self::Pool { @@ -137,6 +138,11 @@ where self.inner.network() } + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + #[inline] fn provider(&self) -> &Self::Provider { self.inner.provider() diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 4ae79c08341..12dbe8f6664 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -19,6 +19,9 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Network API. type Network: Send + Sync + Clone; + /// Builds new blocks. + type PayloadBuilder: Send + Sync + Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -28,6 +31,9 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Returns the handle to the network fn network(&self) -> &Self::Network; + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &Self::PayloadBuilder; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; } @@ -40,6 +46,7 @@ where type Pool = T::Pool; type Evm = ::Evm; type Network = ::Network; + type PayloadBuilder = ::PayloadBuilder; #[inline] fn pool(&self) -> &Self::Pool { @@ -56,6 +63,11 @@ where FullNodeComponents::network(self) } + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + FullNodeComponents::payload_builder(self) + } + #[inline] fn provider(&self) -> &Self::Provider { FullNodeComponents::provider(self) diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index c491ca21dfb..f945aa446b5 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -151,6 +151,7 @@ where type Pool = Pool; type Evm = EvmConfig; type Network = Network; + type PayloadBuilder = (); fn pool(&self) -> &Self::Pool { self.inner.pool() @@ -164,6 +165,10 @@ where self.inner.network() } + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + fn provider(&self) -> &Self::Provider { self.inner.provider() } From c1b4fd84c5e7a2ae39037760c953af9ba4eeb90a Mon Sep 17 00:00:00 2001 From: ftupas <35031356+ftupas@users.noreply.github.com> Date: Sun, 10 Nov 2024 13:25:55 +0100 Subject: [PATCH 397/970] refactor: remove trait bound on `TxType` (#12379) --- Cargo.lock | 37 +++++ Cargo.toml | 1 + crates/primitives-traits/src/tx_type.rs | 1 - crates/primitives/Cargo.toml | 1 + crates/primitives/src/transaction/tx_type.rs | 166 +++++++------------ 5 files changed, 95 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f81a026bd5c..0434d038411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6271,6 +6271,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "relative-path" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" + [[package]] name = "reqwest" version = "0.12.9" @@ -8462,6 +8468,7 @@ dependencies = [ "reth-testing-utils", "reth-trie-common", "revm-primitives", + "rstest", "secp256k1", "serde", "serde_json", @@ -9530,6 +9537,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "rstest" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.1", +] + +[[package]] +name = "rstest_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.1", + "syn 2.0.87", + "unicode-ident", +] + [[package]] name = "ruint" version = "1.12.3" diff --git a/Cargo.toml b/Cargo.toml index 31d2ebb8d4c..7608756b12a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -596,6 +596,7 @@ serial_test = { default-features = false, version = "3" } similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" +rstest = "0.23.0" tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index aebf7584fe9..058f02a7ee6 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -13,7 +13,6 @@ pub trait TxType: + TryFrom + TryFrom + TryFrom - + From + Debug + Display + Clone diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 04d96aa369a..5bef33e15ef 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -79,6 +79,7 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +rstest.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index eff1c17a71a..0cfb2ff9d67 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -217,128 +217,74 @@ impl Decodable for TxType { } } -impl From for TxType { - fn from(value: alloy_consensus::TxType) -> Self { - match value { - alloy_consensus::TxType::Legacy => Self::Legacy, - alloy_consensus::TxType::Eip2930 => Self::Eip2930, - alloy_consensus::TxType::Eip1559 => Self::Eip1559, - alloy_consensus::TxType::Eip4844 => Self::Eip4844, - alloy_consensus::TxType::Eip7702 => Self::Eip7702, - } - } -} - #[cfg(test)] mod tests { use alloy_primitives::hex; - use rand::Rng; use reth_codecs::Compact; + use rstest::rstest; use super::*; - #[test] - fn test_u64_to_tx_type() { - // Test for Legacy transaction - assert_eq!(TxType::try_from(U64::from(LEGACY_TX_TYPE_ID)).unwrap(), TxType::Legacy); - - // Test for EIP2930 transaction - assert_eq!(TxType::try_from(U64::from(EIP2930_TX_TYPE_ID)).unwrap(), TxType::Eip2930); - - // Test for EIP1559 transaction - assert_eq!(TxType::try_from(U64::from(EIP1559_TX_TYPE_ID)).unwrap(), TxType::Eip1559); - - // Test for EIP4844 transaction - assert_eq!(TxType::try_from(U64::from(EIP4844_TX_TYPE_ID)).unwrap(), TxType::Eip4844); - - // Test for EIP7702 transaction - assert_eq!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID)).unwrap(), TxType::Eip7702); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - assert_eq!(TxType::try_from(U64::from(DEPOSIT_TX_TYPE_ID)).unwrap(), TxType::Deposit); - - // For transactions with unsupported values - assert!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID + 1)).is_err()); + #[rstest] + #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] + #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] + #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] + #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] + #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] + #[cfg_attr(feature = "optimism", case(U64::from(DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)))] + #[case(U64::MAX, Err("invalid tx type"))] + fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { + let tx_type_result = TxType::try_from(input); + assert_eq!(tx_type_result, expected); } - #[test] - fn test_txtype_to_compat() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (tx_type, expected_identifier, expected_buf) in cases { - let mut buf = vec![]; - let identifier = tx_type.to_compact(&mut buf); - assert_eq!( - identifier, expected_identifier, - "Unexpected identifier for TxType {tx_type:?}", - ); - assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}"); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); } - #[test] - fn test_txtype_from_compact() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (expected_type, identifier, buf) in cases { - let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); - assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}",); - assert!( - remaining_buf.is_empty(), - "Buffer not fully consumed for identifier {identifier}", - ); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); } - #[test] - fn decode_tx_type() { - // Test for Legacy transaction - let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); - assert_eq!(tx_type, TxType::Legacy); - - // Test for EIP2930 transaction - let tx_type = TxType::decode(&mut &[EIP2930_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip2930); - - // Test for EIP1559 transaction - let tx_type = TxType::decode(&mut &[EIP1559_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip1559); - - // Test for EIP4844 transaction - let tx_type = TxType::decode(&mut &[EIP4844_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip4844); - - // Test for EIP7702 transaction - let tx_type = TxType::decode(&mut &[EIP7702_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip7702); - - // Test random byte not in range - let buf = [rand::thread_rng().gen_range(EIP7702_TX_TYPE_ID + 1..=u8::MAX)]; - assert!(TxType::decode(&mut &buf[..]).is_err()); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - { - let buf = [DEPOSIT_TX_TYPE_ID]; - let tx_type = TxType::decode(&mut &buf[..]).unwrap(); - assert_eq!(tx_type, TxType::Deposit); - } + #[rstest] + #[case(&hex!("80"), Ok(TxType::Legacy))] + #[case(&[EIP2930_TX_TYPE_ID], Ok(TxType::Eip2930))] + #[case(&[EIP1559_TX_TYPE_ID], Ok(TxType::Eip1559))] + #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] + #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] + #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] + #[cfg_attr(feature = "optimism", case(&[DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { + let tx_type_result = TxType::decode(&mut &input[..]); + assert_eq!(tx_type_result, expected) } } From 23ec0af51de8c7d6e1ca4c91c52cffa5dd17a05d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 10 Nov 2024 13:56:36 +0100 Subject: [PATCH 398/970] chore(sdk): auto trait bounds `NodePrimitives` (#12398) --- crates/ethereum/node/src/node.rs | 2 +- crates/node/types/src/lib.rs | 15 +++++++++------ crates/optimism/node/src/node.rs | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index e545a3c73c4..68ed879d223 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -38,7 +38,7 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; /// Ethereum primitive types. -#[derive(Debug)] +#[derive(Debug, Default, Clone)] pub struct EthPrimitives; impl NodePrimitives for EthPrimitives { diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index afb650ada2b..7f4bd4f5722 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -8,24 +8,27 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +pub use reth_primitives_traits::{Block, BlockBody}; + +use core::fmt; +use std::marker::PhantomData; + use reth_chainspec::EthChainSpec; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; use reth_engine_primitives::EngineTypes; -pub use reth_primitives_traits::{Block, BlockBody}; use reth_trie_db::StateCommitment; -use std::marker::PhantomData; /// Configures all the primitive types of the node. -pub trait NodePrimitives { +pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { /// Block primitive. - type Block; + type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// Signed version of the transaction type. - type SignedTx; + type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// A receipt. - type Receipt; + type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; } impl NodePrimitives for () { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 6edab570e29..323148e276b 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -41,7 +41,7 @@ use crate::{ }; /// Optimism primitive types. -#[derive(Debug)] +#[derive(Debug, Default, Clone)] pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { From 7110397f8959950dee3a6c8e0057113d90ccc3e1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Nov 2024 12:58:26 +0000 Subject: [PATCH 399/970] chore(deps): weekly `cargo update` (#12433) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0434d038411..f532ccc5093 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "611cc2ae7d2e242c457e4be7f97036b8ad9ca152b499f53faf99b1ed8fc2553f" [[package]] name = "alloy-chains" @@ -1651,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.36" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", @@ -2186,9 +2186,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -9886,9 +9886,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -10041,9 +10041,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "once_cell", "parking_lot", @@ -10053,9 +10053,9 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", From 3774100a0513e3d33246b3936aac74ef7251f4be Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 10 Nov 2024 14:11:58 +0100 Subject: [PATCH 400/970] chore(sdk): impl `SignedTransaction` for `TransactionSigned` (#12187) --- crates/primitives/src/transaction/mod.rs | 145 ++++++++++++----------- 1 file changed, 73 insertions(+), 72 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ed1a7daf1e8..685fd29a3c0 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1297,96 +1297,35 @@ impl TransactionSigned { } } -impl alloy_consensus::Transaction for TransactionSigned { - fn chain_id(&self) -> Option { - self.deref().chain_id() - } - - fn nonce(&self) -> u64 { - self.deref().nonce() - } - - fn gas_limit(&self) -> u64 { - self.deref().gas_limit() - } - - fn gas_price(&self) -> Option { - self.deref().gas_price() - } - - fn max_fee_per_gas(&self) -> u128 { - self.deref().max_fee_per_gas() - } - - fn max_priority_fee_per_gas(&self) -> Option { - self.deref().max_priority_fee_per_gas() - } - - fn max_fee_per_blob_gas(&self) -> Option { - self.deref().max_fee_per_blob_gas() - } - - fn priority_fee_or_price(&self) -> u128 { - self.deref().priority_fee_or_price() - } - - fn value(&self) -> U256 { - self.deref().value() - } - - fn input(&self) -> &Bytes { - self.deref().input() - } - - fn ty(&self) -> u8 { - self.deref().ty() - } - - fn access_list(&self) -> Option<&AccessList> { - self.deref().access_list() - } - - fn blob_versioned_hashes(&self) -> Option<&[B256]> { - alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) - } - - fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - self.deref().authorization_list() - } - - fn kind(&self) -> TxKind { - self.deref().kind() - } -} - impl SignedTransaction for TransactionSigned { type Transaction = Transaction; fn tx_hash(&self) -> &TxHash { - Self::hash_ref(self) + &self.hash } fn transaction(&self) -> &Self::Transaction { - Self::transaction(self) + &self.transaction } fn signature(&self) -> &Signature { - Self::signature(self) + &self.signature } fn recover_signer(&self) -> Option
{ - Self::recover_signer(self) + let signature_hash = self.signature_hash(); + recover_signer(&self.signature, signature_hash) } fn recover_signer_unchecked(&self) -> Option
{ - Self::recover_signer_unchecked(self) + let signature_hash = self.signature_hash(); + recover_signer_unchecked(&self.signature, signature_hash) } - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: Signature, - ) -> Self { - Self::from_transaction_and_signature(transaction, signature) + fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx } fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { @@ -1469,6 +1408,68 @@ impl SignedTransaction for TransactionSigned { } } +impl alloy_consensus::Transaction for TransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn value(&self) -> U256 { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } +} + impl From for TransactionSigned { fn from(recovered: TransactionSignedEcRecovered) -> Self { recovered.signed_transaction From b893a8879d666f8d512b38a133209268a5a88dc1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 10 Nov 2024 21:03:19 +0100 Subject: [PATCH 401/970] chore: fix deny (#12439) --- deny.toml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deny.toml b/deny.toml index e8f60461c85..8d0807f9de5 100644 --- a/deny.toml +++ b/deny.toml @@ -4,8 +4,12 @@ [advisories] yanked = "warn" ignore = [ - # proc-macro-error 1.0.4 unmaintained https://rustsec.org/advisories/RUSTSEC-2024-0370 - "RUSTSEC-2024-0370" + # https://rustsec.org/advisories/RUSTSEC-2024-0379 used by boa (js-tracer) + "RUSTSEC-2024-0379", + # https://rustsec.org/advisories/RUSTSEC-2024-0384 used by sse example + "RUSTSEC-2024-0384", + # https://rustsec.org/advisories/RUSTSEC-2024-0388 used by ssz, will be removed https://github.com/sigp/ethereum_ssz/pull/34 + "RUSTSEC-2024-0388" ] # This section is considered when running `cargo deny check bans`. From 365f6a1f69bad747c54077d3ff6cb31a1891b334 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 11 Nov 2024 14:59:41 +0400 Subject: [PATCH 402/970] feat: `NetworkPrimitives` (#12435) --- crates/net/eth-wire-types/src/blocks.rs | 58 ++++------ crates/net/eth-wire-types/src/broadcast.rs | 15 +-- crates/net/eth-wire-types/src/lib.rs | 3 + crates/net/eth-wire-types/src/message.rs | 101 +++++++++++------- crates/net/eth-wire-types/src/primitives.rs | 83 ++++++++++++++ crates/net/eth-wire-types/src/transactions.rs | 35 +++--- crates/net/eth-wire/src/ethstream.rs | 27 +++-- crates/net/eth-wire/tests/new_block.rs | 6 +- .../net/eth-wire/tests/pooled_transactions.rs | 7 +- crates/primitives/src/transaction/pooled.rs | 4 + 10 files changed, 226 insertions(+), 113 deletions(-) create mode 100644 crates/net/eth-wire-types/src/primitives.rs diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index c24fc45022f..a7835ae8641 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -5,8 +5,7 @@ use crate::HeadersDirection; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{BlockBody, Header}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -41,34 +40,16 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(rlp, 10)] -pub struct BlockHeaders( +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct BlockHeaders( /// The requested headers. - pub Vec
, + pub Vec, ); -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let headers_count: usize = u.int_in_range(0..=10)?; - let mut headers = Vec::with_capacity(headers_count); - - for _ in 0..headers_count { - headers.push(reth_primitives::generate_valid_header( - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - )) - } - - Ok(Self(headers)) - } -} +generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); -impl From> for BlockHeaders { - fn from(headers: Vec
) -> Self { +impl From> for BlockHeaders { + fn from(headers: Vec) -> Self { Self(headers) } } @@ -94,14 +75,15 @@ impl From> for GetBlockBodies { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 16)] -pub struct BlockBodies( +pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. - pub Vec, + pub Vec, ); -impl From> for BlockBodies { - fn from(bodies: Vec) -> Self { +generate_tests!(#[rlp, 16] BlockBodies, EthBlockBodiesTests); + +impl From> for BlockBodies { + fn from(bodies: Vec) -> Self { Self(bodies) } } @@ -116,11 +98,9 @@ mod tests { use alloy_eips::BlockHashOrNumber; use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{Header, Transaction, TransactionSigned}; + use reth_primitives::{BlockBody, Header, Transaction, TransactionSigned}; use std::str::FromStr; - use super::BlockBody; - #[test] fn decode_hash() { // this is a valid 32 byte rlp string @@ -254,7 +234,7 @@ mod tests { // [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ] let expected = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - RequestPair:: { + RequestPair::> { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -289,7 +269,7 @@ mod tests { #[test] fn decode_block_header() { let data = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair::> { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -357,7 +337,7 @@ mod tests { fn encode_block_bodies() { let expected = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair::> { request_id: 1111, message: BlockBodies(vec![ BlockBody { @@ -428,7 +408,7 @@ mod tests { #[test] fn decode_block_bodies() { let data = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair::> { request_id: 1111, message: BlockBodies(vec![ BlockBody { @@ -504,7 +484,7 @@ mod tests { let body = BlockBodies::default(); let mut buf = Vec::new(); body.encode(&mut buf); - let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + let decoded = BlockBodies::::decode(&mut buf.as_slice()).unwrap(); assert_eq!(body, decoded); } } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 2ef6083a500..03222706992 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,14 +1,14 @@ //! Types for broadcasting new data. -use crate::{EthMessage, EthVersion}; +use crate::{EthMessage, EthVersion, NetworkPrimitives}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{Block, PooledTransactionsElement, TransactionSigned}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; use std::{ collections::{HashMap, HashSet}, @@ -75,14 +75,15 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 25)] -pub struct NewBlock { +pub struct NewBlock { /// A new block. - pub block: Block, + pub block: B, /// The current total difficulty. pub td: U128, } +generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); + /// This informs peers of transactions that have appeared on the network and are not yet included /// in a block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -269,7 +270,7 @@ impl NewPooledTransactionHashes { } } -impl From for EthMessage { +impl From for EthMessage { fn from(value: NewPooledTransactionHashes) -> Self { match value { NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg), diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 0e8fd5df98a..ac7ea55d0b9 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -40,3 +40,6 @@ pub use disconnect_reason::*; pub mod capability; pub use capability::*; + +pub mod primitives; +pub use primitives::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 8546bfe14c8..cca6600d11e 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -11,7 +11,7 @@ use super::{ GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; -use crate::{EthVersion, SharedTransactions}; +use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; @@ -35,14 +35,18 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ProtocolMessage { +pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message, including specific data based on the message type. - pub message: EthMessage, + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: serde::Serialize + serde::de::DeserializeOwned") + )] + pub message: EthMessage, } -impl ProtocolMessage { +impl ProtocolMessage { /// Create a new `ProtocolMessage` from a message type and message rlp bytes. pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; @@ -78,7 +82,7 @@ impl ProtocolMessage { EthMessage::GetBlockHeaders(request_pair) } EthMessageID::BlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; + let request_pair = RequestPair::>::decode(buf)?; EthMessage::BlockHeaders(request_pair) } EthMessageID::GetBlockBodies => { @@ -86,7 +90,7 @@ impl ProtocolMessage { EthMessage::GetBlockBodies(request_pair) } EthMessageID::BlockBodies => { - let request_pair = RequestPair::::decode(buf)?; + let request_pair = RequestPair::>::decode(buf)?; EthMessage::BlockBodies(request_pair) } EthMessageID::GetPooledTransactions => { @@ -124,7 +128,7 @@ impl ProtocolMessage { } } -impl Encodable for ProtocolMessage { +impl Encodable for ProtocolMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -136,23 +140,23 @@ impl Encodable for ProtocolMessage { } } -impl From for ProtocolMessage { - fn from(message: EthMessage) -> Self { +impl From> for ProtocolMessage { + fn from(message: EthMessage) -> Self { Self { message_type: message.message_id(), message } } } /// Represents messages that can be sent to multiple peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ProtocolBroadcastMessage { +#[derive(Clone, Debug)] +pub struct ProtocolBroadcastMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message to be broadcasted, including specific data based on the message /// type. - pub message: EthBroadcastMessage, + pub message: EthBroadcastMessage, } -impl Encodable for ProtocolBroadcastMessage { +impl Encodable for ProtocolBroadcastMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -164,8 +168,8 @@ impl Encodable for ProtocolBroadcastMessage { } } -impl From for ProtocolBroadcastMessage { - fn from(message: EthBroadcastMessage) -> Self { +impl From> for ProtocolBroadcastMessage { + fn from(message: EthBroadcastMessage) -> Self { Self { message_type: message.message_id(), message } } } @@ -189,13 +193,17 @@ impl From for ProtocolBroadcastMessage { /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum EthMessage { +pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), /// Represents a `NewBlockHashes` message broadcast to the network. NewBlockHashes(NewBlockHashes), /// Represents a `NewBlock` message broadcast to the network. - NewBlock(Box), + #[cfg_attr( + feature = "serde", + serde(bound = "N::Block: serde::Serialize + serde::de::DeserializeOwned") + )] + NewBlock(Box>), /// Represents a Transactions message broadcast to the network. Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. @@ -206,11 +214,19 @@ pub enum EthMessage { /// Represents a `GetBlockHeaders` request-response pair. GetBlockHeaders(RequestPair), /// Represents a `BlockHeaders` request-response pair. - BlockHeaders(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockHeader: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockHeaders(RequestPair>), /// Represents a `GetBlockBodies` request-response pair. GetBlockBodies(RequestPair), /// Represents a `BlockBodies` request-response pair. - BlockBodies(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockBody: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockBodies(RequestPair>), /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. @@ -225,7 +241,7 @@ pub enum EthMessage { Receipts(RequestPair), } -impl EthMessage { +impl EthMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -250,7 +266,7 @@ impl EthMessage { } } -impl Encodable for EthMessage { +impl Encodable for EthMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::Status(status) => status.encode(out), @@ -301,16 +317,16 @@ impl Encodable for EthMessage { /// /// Note: This is only useful for outgoing messages. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum EthBroadcastMessage { +pub enum EthBroadcastMessage { /// Represents a new block broadcast message. - NewBlock(Arc), + NewBlock(Arc>), /// Represents a transactions broadcast message. Transactions(SharedTransactions), } // === impl EthBroadcastMessage === -impl EthBroadcastMessage { +impl EthBroadcastMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -320,7 +336,7 @@ impl EthBroadcastMessage { } } -impl Encodable for EthBroadcastMessage { +impl Encodable for EthBroadcastMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::NewBlock(new_block) => new_block.encode(out), @@ -502,8 +518,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, - ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion, + GetNodeData, NodeData, ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -516,20 +532,30 @@ mod tests { #[test] fn test_removed_message_at_eth67() { - let get_node_data = - EthMessage::GetNodeData(RequestPair { request_id: 1337, message: GetNodeData(vec![]) }); + let get_node_data = EthMessage::::GetNodeData(RequestPair { + request_id: 1337, + message: GetNodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::GetNodeData, message: get_node_data, }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); - let node_data = - EthMessage::NodeData(RequestPair { request_id: 1337, message: NodeData(vec![]) }); + let node_data = EthMessage::::NodeData(RequestPair { + request_id: 1337, + message: NodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); } @@ -578,10 +604,11 @@ mod tests { #[test] fn empty_block_bodies_protocol() { - let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { - request_id: 0, - message: Default::default(), - })); + let empty_block_bodies = + ProtocolMessage::from(EthMessage::::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); let mut buf = Vec::new(); empty_block_bodies.encode(&mut buf); let decoded = diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs new file mode 100644 index 00000000000..ca85fa69ad6 --- /dev/null +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -0,0 +1,83 @@ +//! Abstraction over primitive types in network messages. + +use std::fmt::Debug; + +use alloy_rlp::{Decodable, Encodable}; + +/// Abstraction over primitive types which might appear in network messages. See +/// [`crate::EthMessage`] for more context. +pub trait NetworkPrimitives: + Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static +{ + /// The block header type. + type BlockHeader: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The block body type. + type BlockBody: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// Full block type. + type Block: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + + /// The transaction type which peers announce in `Transactions` messages. It is different from + /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being + /// announced and can only be explicitly requested from peers. + type BroadcastedTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The transaction type which peers return in `PooledTransactions` messages. + type PooledTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; +} + +/// Primitive types used by Ethereum network. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub struct EthNetworkPrimitives; + +impl NetworkPrimitives for EthNetworkPrimitives { + type BlockHeader = reth_primitives::Header; + type BlockBody = reth_primitives::BlockBody; + type Block = reth_primitives::Block; + type BroadcastedTransaction = reth_primitives::TransactionSigned; + type PooledTransaction = reth_primitives::PooledTransactionsElement; +} diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 97d18001f13..dfedcb6f83e 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -1,12 +1,11 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{ - transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, -}; +use reth_primitives::{transaction::TransactionConversionError, PooledTransactionsElement}; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -42,38 +41,46 @@ where Eq, RlpEncodableWrapper, RlpDecodableWrapper, - Default, IntoIterator, Deref, Constructor, )] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PooledTransactions( +pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. - pub Vec, + pub Vec, ); -impl PooledTransactions { +impl PooledTransactions { /// Returns an iterator over the transaction hashes in this response. - pub fn hashes(&self) -> impl Iterator + '_ { - self.0.iter().map(|tx| tx.hash()) + pub fn hashes(&self) -> impl Iterator + '_ { + self.0.iter().map(|tx| tx.trie_hash()) } } -impl TryFrom> for PooledTransactions { +impl TryFrom> for PooledTransactions +where + T: TryFrom, +{ type Error = TransactionConversionError; - fn try_from(txs: Vec) -> Result { - txs.into_iter().map(PooledTransactionsElement::try_from).collect() + fn try_from(txs: Vec) -> Result { + txs.into_iter().map(T::try_from).collect() } } -impl FromIterator for PooledTransactions { - fn from_iter>(iter: I) -> Self { +impl FromIterator for PooledTransactions { + fn from_iter>(iter: I) -> Self { Self(iter.into_iter().collect()) } } +impl Default for PooledTransactions { + fn default() -> Self { + Self(Default::default()) + } +} + #[cfg(test)] mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 8ae599b6792..795dd630780 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -2,7 +2,8 @@ use crate::{ errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, - CanDisconnect, DisconnectReason, EthMessage, EthVersion, ProtocolMessage, Status, + CanDisconnect, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, ProtocolMessage, + Status, }; use alloy_primitives::bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; @@ -87,7 +88,12 @@ where // we need to encode and decode here on our own because we don't have an `EthStream` yet // The max length for a status with TTD is: + self.inner - .send(alloy_rlp::encode(ProtocolMessage::from(EthMessage::Status(status))).into()) + .send( + alloy_rlp::encode(ProtocolMessage::from( + EthMessage::::Status(status), + )) + .into(), + ) .await?; let their_msg_res = self.inner.next().await; @@ -106,14 +112,15 @@ where } let version = status.version; - let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { - Ok(m) => m, - Err(err) => { - debug!("decode error in eth handshake: msg={their_msg:x}"); - self.inner.disconnect(DisconnectReason::DisconnectRequested).await?; - return Err(EthStreamError::InvalidMessage(err)) - } - }; + let msg: ProtocolMessage = + match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { + Ok(m) => m, + Err(err) => { + debug!("decode error in eth handshake: msg={their_msg:x}"); + self.inner.disconnect(DisconnectReason::DisconnectRequested).await?; + return Err(EthStreamError::InvalidMessage(err)) + } + }; // The following checks should match the checks in go-ethereum: // https://github.com/ethereum/go-ethereum/blob/9244d5cd61f3ea5a7645fdf2a1a96d53421e412f/eth/protocols/eth/handshake.go#L87-L89 diff --git a/crates/net/eth-wire/tests/new_block.rs b/crates/net/eth-wire/tests/new_block.rs index 266752b74ab..366bf26a3a2 100644 --- a/crates/net/eth-wire/tests/new_block.rs +++ b/crates/net/eth-wire/tests/new_block.rs @@ -11,7 +11,7 @@ fn decode_new_block_network() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/new_block_network_rlp"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -20,7 +20,7 @@ fn decode_new_block_network_bsc_one() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_one"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -29,5 +29,5 @@ fn decode_new_block_network_bsc_two() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_two"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 6690f42631a..22c5fcc3329 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -12,7 +12,7 @@ use test_fuzz::test_fuzz; #[test_fuzz] fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Error> { let input_rlp = &mut &hex_data[..]; - let txs = match PooledTransactions::decode(input_rlp) { + let txs: PooledTransactions = match PooledTransactions::decode(input_rlp) { Ok(txs) => txs, Err(e) => return Err(e), }; @@ -28,7 +28,7 @@ fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Err assert_eq!(expected_encoding, buf); // now do another decoding, on what we encoded - this should succeed - let txs2 = PooledTransactions::decode(&mut &buf[..]).unwrap(); + let txs2: PooledTransactions = PooledTransactions::decode(&mut &buf[..]).unwrap(); // ensure that the payload length is the same assert_eq!(txs.length(), txs2.length()); @@ -54,7 +54,8 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); + let _txs: ProtocolMessage = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } #[test] diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 0d48dd5a443..86cd40a8fe6 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -434,6 +434,10 @@ impl Encodable2718 for PooledTransactionsElement { } } } + + fn trie_hash(&self) -> B256 { + *self.hash() + } } impl Decodable2718 for PooledTransactionsElement { From de6813093d1a657888b864a64d1d87d68e5f9bd4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 11 Nov 2024 12:23:07 +0100 Subject: [PATCH 403/970] chore(trie): rename reth-trie-parallel modules (#12444) --- crates/blockchain-tree/src/chain.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 2 +- crates/engine/tree/src/tree/root.rs | 2 +- crates/trie/parallel/benches/root.rs | 2 +- crates/trie/parallel/src/lib.rs | 4 ++-- crates/trie/parallel/src/{parallel_proof.rs => proof.rs} | 4 +--- crates/trie/parallel/src/{parallel_root.rs => root.rs} | 0 7 files changed, 7 insertions(+), 9 deletions(-) rename crates/trie/parallel/src/{parallel_proof.rs => proof.rs} (98%) rename crates/trie/parallel/src/{parallel_root.rs => root.rs} (100%) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 09ba5c3f851..6ac39c31670 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -23,7 +23,7 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, ops::{Deref, DerefMut}, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 11dd95e5583..e89960d9870 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -43,7 +43,7 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm_primitives::ResultAndState; use std::{ cmp::Ordering, diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index dc039d418eb..fbf6c348138 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -4,7 +4,7 @@ use futures::Stream; use pin_project::pin_project; use reth_provider::providers::ConsistentDbView; use reth_trie::{updates::TrieUpdates, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRootError; +use reth_trie_parallel::root::ParallelStateRootError; use revm_primitives::{EvmState, B256}; use std::{ future::Future, diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index d1ffe49dd0a..eb5b6575b9f 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::collections::HashMap; pub fn calculate_state_root(c: &mut Criterion) { diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index 25fcb4bac3a..5be2a658387 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -14,10 +14,10 @@ pub use storage_root_targets::StorageRootTargets; pub mod stats; /// Implementation of parallel state root computation. -pub mod parallel_root; +pub mod root; /// Implementation of parallel proof computation. -pub mod parallel_proof; +pub mod proof; /// Parallel state root metrics. #[cfg(feature = "metrics")] diff --git a/crates/trie/parallel/src/parallel_proof.rs b/crates/trie/parallel/src/proof.rs similarity index 98% rename from crates/trie/parallel/src/parallel_proof.rs rename to crates/trie/parallel/src/proof.rs index 9c7d6b6b8b3..4cb99b50d0c 100644 --- a/crates/trie/parallel/src/parallel_proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,6 +1,4 @@ -use crate::{ - parallel_root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets, -}; +use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; use alloy_primitives::{map::HashSet, B256}; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/root.rs similarity index 100% rename from crates/trie/parallel/src/parallel_root.rs rename to crates/trie/parallel/src/root.rs From 9ff80977b528e8e30a994aa45ab3406d620417f0 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Mon, 11 Nov 2024 05:24:59 -0600 Subject: [PATCH 404/970] renamed OptimismHardforks to OpHardforks (#12441) --- crates/optimism/chainspec/src/lib.rs | 6 +++--- crates/optimism/consensus/src/lib.rs | 2 +- crates/optimism/evm/src/l1.rs | 2 +- crates/optimism/hardforks/src/lib.rs | 2 +- crates/optimism/node/src/engine.rs | 2 +- crates/optimism/payload/src/builder.rs | 2 +- crates/optimism/rpc/src/eth/receipt.rs | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 7bd0e433a2d..018e75a924c 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -35,7 +35,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives_traits::Header; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -336,7 +336,7 @@ impl EthereumHardforks for OpChainSpec { } } -impl OptimismHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { @@ -486,7 +486,7 @@ mod tests { use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; - use reth_optimism_forks::{OpHardfork, OptimismHardforks}; + use reth_optimism_forks::{OpHardfork, OpHardforks}; use crate::*; diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index bf1428815d0..565294358b8 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -20,7 +20,7 @@ use reth_consensus_common::validation::{ validate_shanghai_withdrawals, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 6ff841b9ddc..143399a5ab7 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -297,7 +297,7 @@ where mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_forks::OptimismHardforks; + use reth_optimism_forks::OpHardforks; use reth_primitives::{Block, BlockBody, TransactionSigned}; use super::*; diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index df159161e0e..3915bcf6cbd 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -19,7 +19,7 @@ pub use hardfork::OpHardfork; use reth_ethereum_forks::EthereumHardforks; /// Extends [`EthereumHardforks`] with optimism helper methods. -pub trait OptimismHardforks: EthereumHardforks { +pub trait OpHardforks: EthereumHardforks { /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 7400f149a96..b79f4f81388 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -14,7 +14,7 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::{OpHardfork, OptimismHardforks}; +use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; /// The types used in the optimism beacon consensus engine. diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 42326de6ea4..47ef376b705 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -13,7 +13,7 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes} use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 40ee5d9fd86..f3d16b4adb5 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -9,7 +9,7 @@ use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceipt use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; From 56b5937691d656c0de1e21d25406da8a77caba32 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Mon, 11 Nov 2024 05:56:06 -0600 Subject: [PATCH 405/970] Move payload_attributes function to trait (#12427) --- crates/payload/builder/src/service.rs | 24 ++++++++++++------------ crates/payload/primitives/src/traits.rs | 6 ++++++ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 43beaf82c38..2c9975cb4c3 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -156,6 +156,18 @@ where let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) } + + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + let (tx, rx) = oneshot::channel(); + self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; + rx.await.ok()? + } } impl PayloadBuilderHandle @@ -169,18 +181,6 @@ where pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } - - /// Returns the payload attributes associated with the given identifier. - /// - /// Note: this returns the attributes of the payload and does not resolve the job. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; - rx.await.ok()? - } } impl Clone for PayloadBuilderHandle diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index d66b0add459..86f04a5b550 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -50,6 +50,12 @@ pub trait PayloadBuilder: Send + Sync + Unpin { /// Sends a message to the service to subscribe to payload events. /// Returns a receiver that will receive them. async fn subscribe(&self) -> Result, Self::Error>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option::PayloadBuilderAttributes, Self::Error>>; } /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into From 5d5f44202406fba424f9472bf70636a0f8f3c2ae Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Nov 2024 12:58:34 +0100 Subject: [PATCH 406/970] chore(sdk): define helper trait `FullNodePrimitives` (#12331) --- Cargo.lock | 16 ++++++------ crates/node/types/Cargo.toml | 7 ++++++ crates/node/types/src/lib.rs | 25 ++++++++++++++++--- crates/primitives-traits/src/block/body.rs | 7 ++++-- crates/primitives-traits/src/block/mod.rs | 7 ++++-- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/receipt.rs | 9 ++++++- .../primitives-traits/src/transaction/mod.rs | 9 ++++--- .../src/transaction/signed.rs | 8 +++--- crates/primitives-traits/src/tx_type.rs | 23 +++++++++-------- 10 files changed, 79 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f532ccc5093..f8587ab2798 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611cc2ae7d2e242c457e4be7f97036b8ad9ca152b499f53faf99b1ed8fc2553f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-chains" @@ -9645,9 +9645,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -10548,18 +10548,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index cc33aac30ff..588fe7c4062 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -17,3 +17,10 @@ reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-primitives-traits.workspace = true reth-trie-db.workspace = true + +[features] +default = ["std"] +std = [ + "reth-primitives-traits/std", + "reth-chainspec/std", +] \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 7f4bd4f5722..f2bd16280f8 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -7,11 +7,11 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] -pub use reth_primitives_traits::{Block, BlockBody}; +pub use reth_primitives_traits::{Block, BlockBody, FullBlock, FullReceipt, FullSignedTx}; -use core::fmt; -use std::marker::PhantomData; +use core::{fmt, marker::PhantomData}; use reth_chainspec::EthChainSpec; use reth_db_api::{ @@ -37,6 +37,25 @@ impl NodePrimitives for () { type Receipt = (); } +/// Helper trait that sets trait bounds on [`NodePrimitives`]. +pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { + /// Block primitive. + type Block: FullBlock>; + /// Signed version of the transaction type. + type SignedTx: FullSignedTx; + /// A receipt. + type Receipt: FullReceipt; +} + +impl NodePrimitives for T +where + T: FullNodePrimitives, +{ + type Block = T::Block; + type SignedTx = T::SignedTx; + type Receipt = T::Receipt; +} + /// The type that configures the essential types of an Ethereum-like node. /// /// This includes the primitive types of a node and chain specification. diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index c9b673ec724..9b703c0d2f1 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -10,11 +10,14 @@ use crate::Block; /// Abstraction for block's body. pub trait BlockBody: - Clone + Send + + Sync + + Unpin + + Clone + + Default + fmt::Debug + PartialEq + Eq - + Default + serde::Serialize + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 519987606ee..125d587e42a 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -20,11 +20,14 @@ impl FullBlock for T where T: Block + Compact {} // todo: make with senders extension trait, so block can be impl by block type already containing // senders pub trait Block: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq - + Default + serde::Serialize + for<'a> serde::Deserialize<'a> + From<(Self::Header, Self::Body)> diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index ec93f2a2163..6208f913c9c 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -22,7 +22,7 @@ pub mod account; pub use account::{Account, Bytecode}; pub mod receipt; -pub use receipt::Receipt; +pub use receipt::{FullReceipt, Receipt}; pub mod transaction; pub use transaction::{ diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 5c317dc49a2..6ab006b89b5 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,5 +1,7 @@ //! Receipt abstraction +use core::fmt; + use alloy_consensus::TxReceipt; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -11,8 +13,13 @@ impl FullReceipt for T where T: Receipt + Compact {} /// Abstraction of a receipt. pub trait Receipt: - TxReceipt + Send + + Sync + + Unpin + + Clone + Default + + fmt::Debug + + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable + Serialize diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index a1ad81ab327..7fd0ec88b31 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,6 +1,6 @@ //! Transaction abstraction -use core::{fmt::Debug, hash::Hash}; +use core::{fmt, hash::Hash}; use alloy_primitives::{TxKind, B256}; @@ -12,9 +12,12 @@ pub mod signed; #[allow(dead_code)] /// Abstraction of a transaction. pub trait Transaction: - Debug - + Default + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + Eq + PartialEq + Hash diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index c40403865df..02e908aec6c 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -16,13 +16,15 @@ impl FullSignedTx for T where T: SignedTransaction + Co /// A signed transaction. pub trait SignedTransaction: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq + Hash - + Send - + Sync + serde::Serialize + for<'a> serde::Deserialize<'a> + alloy_rlp::Encodable diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index 058f02a7ee6..a25a7d659bd 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -1,27 +1,28 @@ +use core::fmt; + use alloy_eips::eip2718::Eip2718Error; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; -use core::fmt::{Debug, Display}; /// Trait representing the behavior of a transaction type. pub trait TxType: - Into - + Into + Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + PartialEq + Eq + PartialEq + + Into + + Into + TryFrom + TryFrom + TryFrom - + Debug - + Display - + Clone - + Copy - + Default + Encodable + Decodable - + Send - + Sync - + 'static { } From 78e70d622996b9787f4432313df508e0f1bf27cc Mon Sep 17 00:00:00 2001 From: Tuan Tran Date: Mon, 11 Nov 2024 19:00:57 +0700 Subject: [PATCH 407/970] chore: move `new_payload_v2` into type EngineAPI (#12431) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 22 ++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index a017c50678f..afe50d915fa 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -174,6 +174,20 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metered version of `new_payload_v2`. + pub async fn new_payload_v2_metered( + &self, + payload: ExecutionPayloadInputV2, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.execution_payload.gas_used; + let res = Self::new_payload_v2(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v2.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res + } + /// See also pub async fn new_payload_v3( &self, @@ -690,13 +704,7 @@ where /// See also async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); - let start = Instant::now(); - let gas_used = payload.execution_payload.gas_used; - let res = Self::new_payload_v2(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v2_metered(payload).await?) } /// Handler for `engine_newPayloadV3` From cdce7ee345b11068047738c2e1cc7762b073698e Mon Sep 17 00:00:00 2001 From: Tuan Tran Date: Mon, 11 Nov 2024 19:16:43 +0700 Subject: [PATCH 408/970] chore: move `new_payload_v1` into type EngineAPI (#12430) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 22 ++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index afe50d915fa..1a5e415bb2d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -153,6 +153,20 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metered version of `new_payload_v1`. + async fn new_payload_v1_metered( + &self, + payload: ExecutionPayloadV1, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.gas_used; + let res = Self::new_payload_v1(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v1.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res + } + /// See also pub async fn new_payload_v2( &self, @@ -691,13 +705,7 @@ where /// Caution: This should not accept the `withdrawals` field async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); - let start = Instant::now(); - let gas_used = payload.gas_used; - let res = Self::new_payload_v1(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v1_metered(payload).await?) } /// Handler for `engine_newPayloadV2` From 29b9238394741da9480c8327d8c2c7d0204b78ba Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Mon, 11 Nov 2024 19:23:52 +0700 Subject: [PATCH 409/970] [refactor] move `new_payload_v3` into EngineAPI (#12442) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-engine-api/src/engine_api.rs | 26 ++++++++++++++------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 1a5e415bb2d..eeb5fcbf187 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -233,6 +233,23 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + // Metrics version of `new_payload_v3` + async fn new_payload_v3_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = + Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v3.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) + } + /// See also pub async fn new_payload_v4( &self, @@ -724,14 +741,7 @@ where parent_beacon_block_root: B256, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v3_metered(payload, versioned_hashes, parent_beacon_block_root).await?) } /// Handler for `engine_newPayloadV4` From b0329ee4d72cd63aad20539a222d72815521c674 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Nov 2024 13:52:21 +0100 Subject: [PATCH 410/970] chore(sdk): impl `Receipt` for `reth_primitives::Receipt` (#12446) --- Cargo.lock | 8 ++-- crates/primitives-traits/src/receipt.rs | 4 ++ crates/primitives/src/receipt.rs | 51 ++++++++++++++++++++++--- 3 files changed, 53 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f8587ab2798..0c64ed9a910 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "611cc2ae7d2e242c457e4be7f97036b8ad9ca152b499f53faf99b1ed8fc2553f" [[package]] name = "alloy-chains" @@ -9645,9 +9645,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags 2.6.0", "errno", diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 6ab006b89b5..bfcd99b08ec 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -3,6 +3,7 @@ use core::fmt; use alloy_consensus::TxReceipt; +use alloy_primitives::B256; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -27,4 +28,7 @@ pub trait Receipt: { /// Returns transaction type. fn tx_type(&self) -> u8; + + /// Calculates the receipts root of all receipts in a block. + fn receipts_root(receipts: &[&Self]) -> B256; } diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index e60bddb9d79..f3750b70a65 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,18 +1,21 @@ -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::TxType; use alloc::{vec, vec::Vec}; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, +use core::{cmp::Ordering, ops::Deref}; + +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Eip658Value, TxReceipt, }; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; -use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; use serde::{Deserialize, Serialize}; +#[cfg(feature = "reth-codec")] +use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; +use crate::TxType; + /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, @@ -64,6 +67,42 @@ impl Receipt { } } +// todo: replace with alloy receipt +impl TxReceipt for Receipt { + fn status_or_post_state(&self) -> Eip658Value { + self.success.into() + } + + fn status(&self) -> bool { + self.success + } + + fn bloom(&self) -> Bloom { + alloy_primitives::logs_bloom(self.logs.iter()) + } + + fn cumulative_gas_used(&self) -> u128 { + self.cumulative_gas_used as u128 + } + + fn logs(&self) -> &[Log] { + &self.logs + } +} + +impl reth_primitives_traits::Receipt for Receipt { + fn tx_type(&self) -> u8 { + self.tx_type as u8 + } + + fn receipts_root(_receipts: &[&Self]) -> B256 { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + crate::proofs::calculate_receipt_root_no_memo(_receipts) + } +} + /// A collection of receipts organized as a two-dimensional vector. #[derive( Clone, From 0014248cd008037c5dc4570be23d5047b019bb76 Mon Sep 17 00:00:00 2001 From: Kien Trinh <51135161+kien6034@users.noreply.github.com> Date: Mon, 11 Nov 2024 19:57:02 +0700 Subject: [PATCH 411/970] [refactor] move `new_payload_v4` into EngineAPI (#12445) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-engine-api/src/engine_api.rs | 46 ++++++++++++++------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eeb5fcbf187..666154f3b22 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -282,6 +282,30 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metrics version of `new_payload_v4` + async fn new_payload_v4_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v4.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) + } + /// Sends a message to the beacon consensus engine to update the fork choice _without_ /// withdrawals. /// @@ -754,20 +778,14 @@ where execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = Self::new_payload_v4( - self, - payload, - versioned_hashes, - parent_beacon_block_root, - execution_requests, - ) - .await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self + .new_payload_v4_metered( + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await?) } /// Handler for `engine_forkchoiceUpdatedV1` From 97736353447e4e5a9f9323ecf3b5839fe7ef8190 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Nov 2024 14:10:40 +0100 Subject: [PATCH 412/970] chore(sdk): make `Receipts` generic over receipt (#12447) --- crates/primitives/src/receipt.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index f3750b70a65..3258d4be6eb 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -117,12 +117,12 @@ impl reth_primitives_traits::Receipt for Receipt { DerefMut, IntoIterator, )] -pub struct Receipts { +pub struct Receipts { /// A two-dimensional vector of optional `Receipt` instances. - pub receipt_vec: Vec>>, + pub receipt_vec: Vec>>, } -impl Receipts { +impl Receipts { /// Returns the length of the `Receipts` vector. pub fn len(&self) -> usize { self.receipt_vec.len() @@ -134,26 +134,26 @@ impl Receipts { } /// Push a new vector of receipts into the `Receipts` collection. - pub fn push(&mut self, receipts: Vec>) { + pub fn push(&mut self, receipts: Vec>) { self.receipt_vec.push(receipts); } /// Retrieves all recorded receipts from index and calculates the root using the given closure. - pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&Receipt]) -> B256) -> Option { + pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&T]) -> B256) -> Option { let receipts = self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; Some(f(receipts.as_slice())) } } -impl From> for Receipts { - fn from(block_receipts: Vec) -> Self { +impl From> for Receipts { + fn from(block_receipts: Vec) -> Self { Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } } } -impl FromIterator>> for Receipts { - fn from_iter>>>(iter: I) -> Self { +impl FromIterator>> for Receipts { + fn from_iter>>>(iter: I) -> Self { iter.into_iter().collect::>().into() } } From c4f10bd11b57cef4ed189867483de447ed457f68 Mon Sep 17 00:00:00 2001 From: clabby Date: Mon, 11 Nov 2024 10:19:50 -0500 Subject: [PATCH 413/970] feat(spec): Holocene activation time for {OP/Base} Sepolia (#12453) --- crates/optimism/chainspec/src/lib.rs | 29 +++++++++++++++++++---- crates/optimism/hardforks/src/hardfork.rs | 8 +++++-- crates/optimism/node/src/engine.rs | 28 ++++++++++------------ 3 files changed, 43 insertions(+), 22 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 018e75a924c..a835b02bd1d 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -392,6 +392,7 @@ impl From for OpChainSpec { (OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time), (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), (OpHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), ]; let mut time_hardforks = time_hardfork_opts @@ -572,7 +573,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 0 }, + ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732201200 }, + ), + ( + Head { number: 0, timestamp: 1732201200, ..Default::default() }, + ForkId { hash: ForkHash([0x98, 0x1c, 0x21, 0x69]), next: 0 }, ), ], ); @@ -639,7 +644,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 0 }, + ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732201200 }, + ), + ( + Head { number: 0, timestamp: 1732201200, ..Default::default() }, + ForkId { hash: ForkHash([0x59, 0x5e, 0x2e, 0x6e]), next: 0 }, ), ], ); @@ -721,6 +730,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70 @@ -742,6 +752,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -765,6 +777,7 @@ mod tests { assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); @@ -772,6 +785,7 @@ mod tests { assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -785,6 +799,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70, @@ -807,6 +822,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -837,6 +854,7 @@ mod tests { assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); @@ -844,6 +862,7 @@ mod tests { assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -955,6 +974,7 @@ mod tests { (String::from("ecotoneTime"), 0.into()), (String::from("fjordTime"), 0.into()), (String::from("graniteTime"), 0.into()), + (String::from("holoceneTime"), 0.into()), ] .into_iter() .collect(), @@ -988,6 +1008,7 @@ mod tests { OpHardfork::Ecotone.boxed(), OpHardfork::Fjord.boxed(), OpHardfork::Granite.boxed(), + OpHardfork::Holocene.boxed(), ]; assert!(expected_hardforks @@ -1036,7 +1057,7 @@ mod tests { } #[test] - fn test_get_base_fee_holocene_nonce_not_set() { + fn test_get_base_fee_holocene_extra_data_not_set() { let op_chain_spec = holocene_chainspec(); let parent = Header { base_fee_per_gas: Some(1), @@ -1058,7 +1079,7 @@ mod tests { } #[test] - fn test_get_base_fee_holocene_nonce_set() { + fn test_get_base_fee_holocene_extra_data_set() { let op_chain_spec = holocene_chainspec(); let parent = Header { base_fee_per_gas: Some(1), diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 9a9786a8fe0..91b2584e4f9 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -158,7 +158,7 @@ impl OpHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), - Self::Holocene => None, + Self::Holocene => Some(1732201200), }, ) } @@ -257,6 +257,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732201200)), ]) } @@ -288,6 +289,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732201200)), ]) } @@ -354,7 +356,8 @@ mod tests { #[test] fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe"]; + let hardfork_str = + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE"]; let expected_hardforks = [ OpHardfork::Bedrock, OpHardfork::Regolith, @@ -362,6 +365,7 @@ mod tests { OpHardfork::Ecotone, OpHardfork::Fjord, OpHardfork::Granite, + OpHardfork::Holocene, ]; let hardforks: Vec = diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index b79f4f81388..69755d10446 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -172,16 +172,12 @@ mod test { use crate::engine; use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; - use reth_chainspec::ForkCondition; use reth_optimism_chainspec::BASE_SEPOLIA; use super::*; - fn get_chainspec(is_holocene: bool) -> Arc { - let mut hardforks = OpHardfork::base_sepolia(); - if is_holocene { - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); - } + fn get_chainspec() -> Arc { + let hardforks = OpHardfork::base_sepolia(); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, @@ -217,8 +213,8 @@ mod test { #[test] fn test_well_formed_attributes_pre_holocene() { - let validator = OpEngineValidator::new(get_chainspec(false)); - let attributes = get_attributes(None, 1799999999); + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732201199); let result = Date: Mon, 11 Nov 2024 17:21:39 +0100 Subject: [PATCH 414/970] chore(sdk): define new `BlockHeader` trait (#12452) --- crates/evm/Cargo.toml | 1 + crates/evm/src/lib.rs | 7 +-- crates/primitives-traits/src/block/header.rs | 49 ++++++++++++++++++++ crates/primitives-traits/src/block/mod.rs | 10 ++-- crates/primitives-traits/src/lib.rs | 8 +++- 5 files changed, 65 insertions(+), 10 deletions(-) create mode 100644 crates/primitives-traits/src/block/header.rs diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index c895110209b..9d6a616af98 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -30,6 +30,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true futures-util.workspace = true diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index e30ff9b1a7a..d20dbe4594a 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -17,13 +17,15 @@ extern crate alloc; -use crate::builder::RethEvmBuilder; +use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; +use crate::builder::RethEvmBuilder; + pub mod builder; pub mod either; pub mod execute; @@ -33,7 +35,6 @@ pub mod noop; pub mod provider; pub mod state_change; pub mod system_calls; - #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; @@ -155,7 +156,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { block_env.coinbase = header.beneficiary(); block_env.timestamp = U256::from(header.timestamp()); if after_merge { - block_env.prevrandao = Some(header.mix_hash()); + block_env.prevrandao = header.mix_hash(); block_env.difficulty = U256::ZERO; } else { block_env.difficulty = header.difficulty(); diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs new file mode 100644 index 00000000000..8ad85a5961a --- /dev/null +++ b/crates/primitives-traits/src/block/header.rs @@ -0,0 +1,49 @@ +//! Block header data primitive. + +use core::fmt; + +use alloy_primitives::Sealable; +use reth_codecs::Compact; + +/// Helper trait that unifies all behaviour required by block header to support full node +/// operations. +pub trait FullBlockHeader: BlockHeader + Compact {} + +impl FullBlockHeader for T where T: BlockHeader + Compact {} + +/// Abstraction of a block header. +pub trait BlockHeader: + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable +{ +} + +impl BlockHeader for T where + T: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable +{ +} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 125d587e42a..185b61e9782 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,19 +1,19 @@ //! Block abstraction. pub mod body; +pub mod header; use alloc::{fmt, vec::Vec}; -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; +use alloy_primitives::{Address, B256}; use reth_codecs::Compact; -use crate::BlockBody; +use crate::{BlockBody, BlockHeader, FullBlockHeader}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: Block + Compact {} -impl FullBlock for T where T: Block + Compact {} +impl FullBlock for T where T: Block + Compact {} /// Abstraction of block data type. // todo: make sealable super-trait, depends on @@ -34,7 +34,7 @@ pub trait Block: + Into<(Self::Header, Self::Body)> { /// Header part of the block. - type Header: BlockHeader + Sealable; + type Header: BlockHeader; /// The block's body contains the transactions in the block. type Body: BlockBody; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 6208f913c9c..3d8aea04e3d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -34,7 +34,11 @@ mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; pub mod block; -pub use block::{body::BlockBody, Block, FullBlock}; +pub use block::{ + body::BlockBody, + header::{BlockHeader, FullBlockHeader}, + Block, FullBlock, +}; mod withdrawal; pub use withdrawal::Withdrawal; @@ -56,7 +60,7 @@ pub use tx_type::TxType; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; +pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// From 24b3e63ab3a43674f77ada8d19372e9759ff7e11 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 11 Nov 2024 20:35:01 +0400 Subject: [PATCH 415/970] feat: make Consensus trait generic over block parts (#12451) --- crates/consensus/common/src/validation.rs | 47 ++++++++++++- crates/consensus/consensus/src/lib.rs | 34 +++++++--- crates/consensus/consensus/src/noop.rs | 25 +++++-- crates/consensus/consensus/src/test_utils.rs | 51 +++++++++++--- crates/ethereum/consensus/src/lib.rs | 14 +++- .../src/headers/reverse_headers.rs | 2 +- crates/net/network/src/fetch/mod.rs | 4 +- crates/net/p2p/src/full_block.rs | 66 ++++--------------- crates/net/p2p/src/test_utils/headers.rs | 6 +- crates/optimism/consensus/src/lib.rs | 18 +++-- crates/primitives-traits/src/header/sealed.rs | 14 ++-- crates/primitives/src/block.rs | 52 ++++++++++----- 12 files changed, 219 insertions(+), 114 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 5a74433e58b..c10116f2276 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -4,7 +4,9 @@ use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{ + BlockBody, EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, +}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -73,6 +75,49 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } +/// Ensures the block response data matches the header. +/// +/// This ensures the body response items match the header's hashes: +/// - ommer hash +/// - transaction root +/// - withdrawals root +pub fn validate_body_against_header( + body: &BlockBody, + header: &SealedHeader, +) -> Result<(), ConsensusError> { + let ommers_hash = body.calculate_ommers_root(); + if header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), + )) + } + + let tx_root = body.calculate_tx_root(); + if header.transactions_root != tx_root { + return Err(ConsensusError::BodyTransactionRootDiff( + GotExpected { got: tx_root, expected: header.transactions_root }.into(), + )) + } + + match (header.withdrawals_root, &body.withdrawals) { + (Some(header_withdrawals_root), Some(withdrawals)) => { + let withdrawals = withdrawals.as_slice(); + let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff( + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case + } + _ => return Err(ConsensusError::WithdrawalsRootUnexpected), + } + + Ok(()) +} + /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 4bf5da3b152..91ec42608c1 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -15,8 +15,8 @@ use alloc::{fmt::Debug, vec::Vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, - InvalidTransactionError, Receipt, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, + Header, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, }; /// A consensus implementation that does nothing. @@ -44,11 +44,11 @@ impl<'a> PostExecutionInput<'a> { /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; /// Validate that the header information regarding parent are correct. /// This checks the block number, timestamp, basefee and gas limit increment. @@ -61,8 +61,8 @@ pub trait Consensus: Debug + Send + Sync { /// Note: Validating header against its parent does not include other Consensus validations. fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError>; /// Validates the given headers @@ -71,7 +71,13 @@ pub trait Consensus: Debug + Send + Sync { /// on its own and valid against its parent. /// /// Note: this expects that the headers are in natural order (ascending block number) - fn validate_header_range(&self, headers: &[SealedHeader]) -> Result<(), HeaderConsensusError> { + fn validate_header_range( + &self, + headers: &[SealedHeader], + ) -> Result<(), HeaderConsensusError> + where + H: Clone, + { if let Some((initial_header, remaining_headers)) = headers.split_first() { self.validate_header(initial_header) .map_err(|e| HeaderConsensusError(e, initial_header.clone()))?; @@ -94,10 +100,17 @@ pub trait Consensus: Debug + Send + Sync { /// Note: validating headers with TD does not include other Consensus validation. fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// @@ -107,7 +120,8 @@ pub trait Consensus: Debug + Send + Sync { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; /// Validate a block considering world state, i.e. things that can not be checked before /// execution. @@ -407,4 +421,4 @@ impl From for ConsensusError { /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(derive_more::Display, derive_more::Error, Debug)] #[display("Consensus error: {_0}, Invalid header: {_1:?}")] -pub struct HeaderConsensusError(ConsensusError, SealedHeader); +pub struct HeaderConsensusError(ConsensusError, SealedHeader); diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 53bdb72afb2..9b72f89b176 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,34 +1,45 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl Consensus for NoopConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { Ok(()) } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 43694720917..52926ec323e 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,18 +1,25 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] pub struct TestConsensus { /// Flag whether the header validation should purposefully fail fail_validation: AtomicBool, + /// Separate flag for setting whether `validate_body_against_header` should fail. It is needed + /// for testing networking logic for which the body failing this check is getting completely + /// rejected while more high-level failures are handled by the sync logic. + fail_body_against_header: AtomicBool, } impl Default for TestConsensus { fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } + Self { + fail_validation: AtomicBool::new(false), + fail_body_against_header: AtomicBool::new(false), + } } } @@ -24,12 +31,23 @@ impl TestConsensus { /// Update the validation flag. pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) + self.fail_validation.store(val, Ordering::SeqCst); + self.fail_body_against_header.store(val, Ordering::SeqCst); + } + + /// Returns the body validation flag. + pub fn fail_body_against_header(&self) -> bool { + self.fail_body_against_header.load(Ordering::SeqCst) + } + + /// Update the body validation flag. + pub fn set_fail_body_against_header(&self, val: bool) { + self.fail_body_against_header.store(val, Ordering::SeqCst); } } -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl Consensus for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { @@ -39,8 +57,8 @@ impl Consensus for TestConsensus { fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -51,7 +69,7 @@ impl Consensus for TestConsensus { fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { @@ -61,7 +79,22 @@ impl Consensus for TestConsensus { } } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_body_against_header() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 07c2a71e8cf..d5cf692928f 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -15,11 +15,11 @@ use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, - validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee, - validate_header_extradata, validate_header_gas, + validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, + validate_header_base_fee, validate_header_extradata, validate_header_gas, }; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; @@ -212,6 +212,14 @@ impl Consensu Ok(()) } + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { validate_block_pre_execution(block, &self.chain_spec) } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 941d140b39d..f0c28dc5d9f 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1310,7 +1310,7 @@ mod tests { fn test_head_update() { let client = Arc::new(TestHeadersClient::default()); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let mut downloader = ReverseHeadersDownloaderBuilder::default() .build(Arc::clone(&client), Arc::new(TestConsensus::default())); diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index f5c0006bc3a..d37fa8b4f4a 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -473,7 +473,6 @@ mod tests { use super::*; use crate::{peers::PeersManager, PeersConfig}; use alloy_primitives::B512; - use reth_primitives::SealedHeader; use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] @@ -590,8 +589,7 @@ mod tests { }, response: tx, }; - let mut header = SealedHeader::default().unseal(); - header.number = 0u64; + let header = Header { number: 0, ..Default::default() }; (req, header) }; diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index e5129b68674..a61d4ea126d 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -6,10 +6,10 @@ use crate::{ BlockClient, }; use alloy_primitives::{Sealable, B256}; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::Consensus; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, @@ -55,6 +55,7 @@ where let client = self.client.clone(); FetchFullBlockFuture { hash, + consensus: self.consensus.clone(), request: FullBlockRequest { header: Some(client.get_header(hash.into())), body: Some(client.get_block_body(hash)), @@ -110,6 +111,7 @@ where Client: BlockClient, { client: Client, + consensus: Arc, hash: B256, request: FullBlockRequest, header: Option, @@ -142,7 +144,8 @@ where BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(&header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); self.header = Some(header); @@ -156,7 +159,7 @@ where fn on_block_response(&mut self, resp: WithPeerId) { if let Some(ref header) = self.header { - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); return @@ -306,50 +309,6 @@ enum BodyResponse { /// Still needs to be validated against header PendingValidation(WithPeerId), } - -/// Ensures the block response data matches the header. -/// -/// This ensures the body response items match the header's hashes: -/// - ommer hash -/// - transaction root -/// - withdrawals root -fn ensure_valid_body_response( - header: &SealedHeader, - block: &BlockBody, -) -> Result<(), ConsensusError> { - let ommers_hash = block.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), - )) - } - - let tx_root = block.calculate_tx_root(); - if header.transactions_root != tx_root { - return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), - )) - } - - match (header.withdrawals_root, &block.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); - if withdrawals_root != header_withdrawals_root { - return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::WithdrawalsRootUnexpected), - } - - Ok(()) -} - /// A future that downloads a range of full blocks from the network. /// /// This first fetches the headers for the given range using the inner `Client`. Once the request @@ -446,7 +405,9 @@ where BodyResponse::Validated(body) => body, BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = + self.consensus.validate_body_against_header(resp.data(), header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response"); self.client.report_bad_message(resp.peer_id()); @@ -695,7 +656,7 @@ mod tests { #[tokio::test] async fn download_single_full_block() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -707,7 +668,7 @@ mod tests { #[tokio::test] async fn download_single_full_block_range() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -722,7 +683,7 @@ mod tests { client: &TestFullBlockClient, range: Range, ) -> (SealedHeader, BlockBody) { - let mut sealed_header = SealedHeader::default(); + let mut sealed_header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); for _ in range { let (mut header, hash) = sealed_header.split(); @@ -785,6 +746,7 @@ mod tests { let test_consensus = reth_consensus::test_utils::TestConsensus::default(); test_consensus.set_fail_validation(true); + test_consensus.set_fail_body_against_header(false); let client = FullBlockClient::new(client, Arc::new(test_consensus)); let received = client.get_full_block_range(header.hash(), range_length as u64).await; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index e61183d22e4..4f603f6339b 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -143,8 +143,10 @@ impl Stream for TestDownload { return Poll::Ready(None) } - let empty = SealedHeader::default(); - if let Err(error) = this.consensus.validate_header_against_parent(&empty, &empty) { + let empty: SealedHeader = SealedHeader::default(); + if let Err(error) = + Consensus::<_>::validate_header_against_parent(&this.consensus, &empty, &empty) + { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 565294358b8..476b259529e 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -15,13 +15,15 @@ use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extradata, validate_header_gas, - validate_shanghai_withdrawals, + validate_against_parent_hash_number, validate_against_parent_timestamp, + validate_body_against_header, validate_cancun_gas, validate_header_base_fee, + validate_header_extradata, validate_header_gas, validate_shanghai_withdrawals, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; -use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{ + BlockBody, BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, +}; use std::{sync::Arc, time::SystemTime}; mod proof; @@ -119,6 +121,14 @@ impl Consensus for OpBeaconConsensus { Ok(()) } + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { // Check ommers hash let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 7552ece31f1..799f698f1c5 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -30,12 +30,10 @@ impl SealedHeader { pub const fn new(header: H, hash: BlockHash) -> Self { Self { header, hash } } -} -impl SealedHeader { /// Returns the sealed Header fields. #[inline] - pub const fn header(&self) -> &Header { + pub const fn header(&self) -> &H { &self.header } @@ -46,15 +44,17 @@ impl SealedHeader { } /// Extract raw header that can be modified. - pub fn unseal(self) -> Header { + pub fn unseal(self) -> H { self.header } /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. - pub fn split(self) -> (Header, BlockHash) { + pub fn split(self) -> (H, BlockHash) { (self.header, self.hash) } +} +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { BlockNumHash::new(self.number, self.hash) @@ -67,9 +67,9 @@ impl SealedHeader { } } -impl Default for SealedHeader { +impl Default for SealedHeader { fn default() -> Self { - let sealed = Header::default().seal_slow(); + let sealed = H::default().seal_slow(); let (header, hash) = sealed.into_parts(); Self { header, hash } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 54bcb27293c..b54a7bd0f78 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -256,22 +256,21 @@ impl BlockWithSenders { /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp, 32))] -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlock { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlock { /// Locked block header. #[deref] #[deref_mut] - pub header: SealedHeader, + pub header: SealedHeader, /// Block body. - pub body: BlockBody, + pub body: B, } -impl SealedBlock { +impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] - pub const fn new(header: SealedHeader, body: BlockBody) -> Self { + pub const fn new(header: SealedHeader, body: B) -> Self { Self { header, body } } @@ -281,16 +280,18 @@ impl SealedBlock { self.header.hash() } - /// Splits the sealed block into underlying components + /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) + pub fn split_header_body(self) -> (SealedHeader, B) { + (self.header, self.body) } +} - /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components +impl SealedBlock { + /// Splits the sealed block into underlying components #[inline] - pub fn split_header_body(self) -> (SealedHeader, BlockBody) { - (self.header, self.body) + pub fn split(self) -> (SealedHeader, Vec, Vec
) { + (self.header, self.body.transactions, self.body.ommers) } /// Returns an iterator over all blob transactions of the block @@ -436,6 +437,27 @@ impl From for Block { } } +impl Default for SealedBlock +where + SealedHeader: Default, + B: Default, +{ + fn default() -> Self { + Self { header: Default::default(), body: Default::default() } + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock +where + SealedHeader: arbitrary::Arbitrary<'a>, + B: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { header: u.arbitrary()?, body: u.arbitrary()? }) + } +} + /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] pub struct SealedBlockWithSenders { @@ -503,7 +525,7 @@ impl SealedBlockWithSenders { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block = SealedBlock::arbitrary(u)?; + let block: SealedBlock = SealedBlock::arbitrary(u)?; let senders = block .body @@ -1083,7 +1105,7 @@ mod tests { #[test] fn test_default_seal() { - let block = SealedBlock::default(); + let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); let block = block.unseal(); let block = block.seal_slow(); From eccff7d24b991746af8a51346cd54361b5992bca Mon Sep 17 00:00:00 2001 From: Tuan Tran Date: Mon, 11 Nov 2024 23:54:00 +0700 Subject: [PATCH 416/970] chore(reth_primitives): Use trait for `size` methods in primitive types (#12201) Co-authored-by: Matthias Seitz --- Cargo.lock | 4 +++ crates/net/downloaders/Cargo.toml | 4 ++- crates/net/downloaders/src/bodies/bodies.rs | 1 + crates/net/downloaders/src/bodies/request.rs | 1 + crates/net/p2p/Cargo.toml | 8 +++-- crates/net/p2p/src/bodies/response.rs | 21 +++++++------ crates/primitives-traits/src/header/sealed.rs | 6 +++- crates/primitives-traits/src/lib.rs | 4 +++ crates/primitives-traits/src/size.rs | 5 +++ crates/primitives/src/block.rs | 23 +++++++++----- crates/primitives/src/transaction/mod.rs | 31 ++++++++++--------- crates/prune/prune/Cargo.toml | 1 + .../src/segments/user/receipts_by_logs.rs | 2 +- crates/transaction-pool/Cargo.toml | 10 ++++-- .../transaction-pool/src/test_utils/mock.rs | 2 +- crates/transaction-pool/src/validate/eth.rs | 3 +- 16 files changed, 84 insertions(+), 42 deletions(-) create mode 100644 crates/primitives-traits/src/size.rs diff --git a/Cargo.lock b/Cargo.lock index 0c64ed9a910..0990a469950 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7039,6 +7039,7 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -7828,6 +7829,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "tokio", "tracing", @@ -8570,6 +8572,7 @@ dependencies = [ "reth-errors", "reth-exex-types", "reth-metrics", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -9192,6 +9195,7 @@ dependencies = [ "reth-fs-util", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 272db6fc6d1..69a59f698de 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-consensus.workspace = true reth-network-p2p.workspace = true reth-network-peers.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true @@ -80,5 +81,6 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-db-api?/test-utils", - "reth-provider/test-utils" + "reth-provider/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 314f3a09084..af113fdb38b 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -14,6 +14,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::size::InMemorySize; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index c2b36732b51..5ab44ed0811 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; use std::{ collections::VecDeque, mem, diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 3b6d74c9dbe..89855396925 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true reth-network-peers.workspace = true @@ -32,7 +33,6 @@ tokio = { workspace = true, features = ["sync"] } auto_impl.workspace = true tracing.workspace = true derive_more.workspace = true - parking_lot = { workspace = true, optional = true } [dev-dependencies] @@ -47,11 +47,13 @@ test-utils = [ "reth-consensus/test-utils", "parking_lot", "reth-network-types/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" ] std = [ "reth-consensus/std", "reth-primitives/std", "alloy-eips/std", - "alloy-primitives/std" + "alloy-primitives/std", + "reth-primitives-traits/std" ] diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 8ae840fbf66..0a45008acd8 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,5 +1,6 @@ use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] @@ -19,15 +20,6 @@ impl BlockResponse { } } - /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Full(block) => SealedBlock::size(block), - Self::Empty(header) => SealedHeader::size(header), - } - } - /// Return the block number pub fn block_number(&self) -> BlockNumber { self.header().number @@ -41,3 +33,14 @@ impl BlockResponse { } } } + +impl InMemorySize for BlockResponse { + /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. + #[inline] + fn size(&self) -> usize { + match self { + Self::Full(block) => SealedBlock::size(block), + Self::Empty(header) => SealedHeader::size(header), + } + } +} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 799f698f1c5..995c13748b3 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,3 +1,5 @@ +use crate::InMemorySize; + use super::Header; use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; @@ -59,10 +61,12 @@ impl SealedHeader { pub fn num_hash(&self) -> BlockNumHash { BlockNumHash::new(self.number, self.hash) } +} +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + mem::size_of::() } } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 3d8aea04e3d..6fcb725cfa4 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -73,3 +73,7 @@ pub use header::{Header, HeaderError, SealedHeader}; pub mod serde_bincode_compat { pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; } + +/// Heuristic size trait +pub mod size; +pub use size::InMemorySize; diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs new file mode 100644 index 00000000000..173f8cedc9e --- /dev/null +++ b/crates/primitives-traits/src/size.rs @@ -0,0 +1,5 @@ +/// Trait for calculating a heuristic for the in-memory size of a struct. +pub trait InMemorySize { + /// Returns a heuristic for the in-memory size of a struct. + fn size(&self) -> usize; +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b54a7bd0f78..275f86c5b45 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -6,6 +6,7 @@ use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -84,10 +85,12 @@ impl Block { let senders = self.senders()?; Some(BlockWithSenders { block: self, senders }) } +} +impl InMemorySize for Block { /// Calculates a heuristic for the in-memory size of the [`Block`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + self.body.size() } } @@ -376,12 +379,6 @@ impl SealedBlock { Block { header: self.header.unseal(), body: self.body } } - /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. - #[inline] - pub fn size(&self) -> usize { - self.header.size() + self.body.size() - } - /// Calculates the total gas used by blob transactions in the sealed block. pub fn blob_gas_used(&self) -> u64 { self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() @@ -431,6 +428,14 @@ impl SealedBlock { } } +impl InMemorySize for SealedBlock { + /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. + #[inline] + fn size(&self) -> usize { + self.header.size() + self.body.size() + } +} + impl From for Block { fn from(block: SealedBlock) -> Self { block.unseal() @@ -625,10 +630,12 @@ impl BlockBody { pub fn transactions(&self) -> impl Iterator + '_ { self.transactions.iter() } +} +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.transactions.iter().map(TransactionSigned::size).sum::() + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 685fd29a3c0..485de92ea89 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -24,6 +24,7 @@ use once_cell::sync::Lazy as LazyLock; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] @@ -472,20 +473,6 @@ impl Transaction { } } - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Legacy(tx) => tx.size(), - Self::Eip2930(tx) => tx.size(), - Self::Eip1559(tx) => tx.size(), - Self::Eip4844(tx) => tx.size(), - Self::Eip7702(tx) => tx.size(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.size(), - } - } - /// Returns true if the transaction is a legacy transaction. #[inline] pub const fn is_legacy(&self) -> bool { @@ -557,6 +544,22 @@ impl Transaction { } } +impl InMemorySize for Transaction { + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip4844(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.size(), + } + } +} + #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 2f2a37d5ba6..4df9ace8133 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -41,6 +41,7 @@ rustc-hash.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-tracing.workspace = true diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee2accee1b3..ee404b074c3 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -10,7 +10,6 @@ use reth_prune_types::{ SegmentOutput, MINIMUM_PRUNING_DISTANCE, }; use tracing::{instrument, trace}; - #[derive(Debug)] pub struct ReceiptsByLogs { config: ReceiptsLogPruneConfig, @@ -223,6 +222,7 @@ mod tests { use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; + use reth_primitives_traits::InMemorySize; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider}; use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 1bfb10d86d7..7c760c81c54 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -17,6 +17,7 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } +reth-primitives-traits.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true @@ -50,6 +51,7 @@ bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true + # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } @@ -84,7 +86,7 @@ serde = [ "parking_lot/serde", "rand?/serde", "revm/serde", - "smallvec/serde" + "smallvec/serde", ] test-utils = [ "rand", @@ -94,7 +96,8 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-primitives-traits/test-utils", ] arbitrary = [ "proptest", @@ -107,7 +110,8 @@ arbitrary = [ "alloy-primitives/arbitrary", "bitflags/arbitrary", "revm/arbitrary", - "smallvec/arbitrary" + "reth-primitives-traits/arbitrary", + "smallvec/arbitrary", ] [[bench]] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 92f74665279..fc43349f3f1 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -28,7 +28,6 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; - use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -1007,6 +1006,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; + use reth_primitives_traits::size::InMemorySize; arb::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 62e9f3f2917..d5f7101eb55 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -17,7 +17,8 @@ use alloy_consensus::constants::{ }; use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{InvalidTransactionError, SealedBlock}; +use reth_primitives_traits::GotExpected; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ From 2f8a2f0fbb29019c11a0e495528146fdcc09229c Mon Sep 17 00:00:00 2001 From: malik Date: Mon, 11 Nov 2024 19:12:24 +0100 Subject: [PATCH 417/970] chore: remove unnecessary clone (#12455) --- crates/chain-state/src/in_memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 6bef197bea9..bfae8113e3e 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -338,7 +338,7 @@ impl CanonicalInMemoryState { // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); From 5edca402b04cc27c5dad8b62c34d44210040f2e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96zg=C3=BCn=20=C3=96zerk?= Date: Tue, 12 Nov 2024 12:07:05 +0300 Subject: [PATCH 418/970] feat(op): define OpTxType (#12443) --- Cargo.lock | 6 + crates/optimism/primitives/Cargo.toml | 6 + crates/optimism/primitives/src/lib.rs | 1 + crates/optimism/primitives/src/op_tx_type.rs | 189 +++++++++++++++++++ crates/primitives-traits/src/tx_type.rs | 22 +++ 5 files changed, 224 insertions(+) create mode 100644 crates/optimism/primitives/src/op_tx_type.rs diff --git a/Cargo.lock b/Cargo.lock index 0990a469950..f2a9310c485 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8327,8 +8327,14 @@ name = "reth-optimism-primitives" version = "1.1.1" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", + "alloy-rlp", + "bytes", + "derive_more 1.0.0", + "op-alloy-consensus", "reth-primitives", + "reth-primitives-traits", ] [[package]] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index a2d4c20a8b7..bc11c358504 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,5 +13,11 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-primitives-traits.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +op-alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-rlp.workspace = true +derive_more.workspace = true +bytes.workspace = true diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 659900b9adb..f8d8e511498 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -8,3 +8,4 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod bedrock; +pub mod op_tx_type; diff --git a/crates/optimism/primitives/src/op_tx_type.rs b/crates/optimism/primitives/src/op_tx_type.rs new file mode 100644 index 00000000000..b317bb05c9c --- /dev/null +++ b/crates/optimism/primitives/src/op_tx_type.rs @@ -0,0 +1,189 @@ +//! newtype pattern on `op_alloy_consensus::OpTxType`. +//! `OpTxType` implements `reth_primitives_traits::TxType`. +//! This type is required because a `Compact` impl is needed on the deposit tx type. + +use alloy_primitives::{U64, U8}; +use alloy_rlp::{Decodable, Encodable, Error}; +use bytes::BufMut; +use core::fmt::Debug; +use derive_more::{ + derive::{From, Into}, + Display, +}; +use op_alloy_consensus::OpTxType as AlloyOpTxType; +use std::convert::TryFrom; + +/// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[into(u8)] +pub struct OpTxType(AlloyOpTxType); + +impl From for U8 { + fn from(tx_type: OpTxType) -> Self { + Self::from(u8::from(tx_type)) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u8) -> Result { + AlloyOpTxType::try_from(value) + .map(OpTxType) + .map_err(|_| Error::Custom("Invalid transaction type")) + } +} + +impl Default for OpTxType { + fn default() -> Self { + Self(AlloyOpTxType::Legacy) + } +} + +impl PartialEq for OpTxType { + fn eq(&self, other: &u8) -> bool { + let self_as_u8: u8 = (*self).into(); + &self_as_u8 == other + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u64) -> Result { + if value > u8::MAX as u64 { + return Err(Error::Custom("value out of range")); + } + Self::try_from(value as u8) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: U64) -> Result { + let u64_value: u64 = value.try_into().map_err(|_| Error::Custom("value out of range"))?; + Self::try_from(u64_value) + } +} + +impl Encodable for OpTxType { + fn length(&self) -> usize { + let value: u8 = (*self).into(); + value.length() + } + + fn encode(&self, out: &mut dyn BufMut) { + let value: u8 = (*self).into(); + value.encode(out); + } +} + +impl Decodable for OpTxType { + fn decode(buf: &mut &[u8]) -> Result { + // Decode the u8 value from RLP + let value = if buf.is_empty() { + return Err(alloy_rlp::Error::InputTooShort); + } else if buf[0] == 0x80 { + 0 // Special case: RLP encoding for integer 0 is `b"\x80"` + } else { + u8::decode(buf)? + }; + + Self::try_from(value).map_err(|_| alloy_rlp::Error::Custom("Invalid transaction type")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + + #[test] + fn test_from_alloy_op_tx_type() { + let alloy_tx = AlloyOpTxType::Legacy; + let op_tx: OpTxType = OpTxType::from(alloy_tx); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_from_op_tx_type_to_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: u8 = op_tx.into(); + assert_eq!(tx_type_u8, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_from_op_tx_type_to_u8_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: U8 = op_tx.into(); + assert_eq!(tx_type_u8, U8::from(AlloyOpTxType::Legacy as u8)); + } + + #[test] + fn test_try_from_u8() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u8).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_invalid_u8() { + let invalid_value: u8 = 255; + let result = OpTxType::try_from(invalid_value); + assert_eq!(result, Err(Error::Custom("Invalid transaction type"))); + } + + #[test] + fn test_try_from_u64() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u64).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_u64_out_of_range() { + let result = OpTxType::try_from(u64::MAX); + assert_eq!(result, Err(Error::Custom("value out of range"))); + } + + #[test] + fn test_try_from_u64_within_range() { + let valid_value: U64 = U64::from(AlloyOpTxType::Legacy as u64); + let op_tx = OpTxType::try_from(valid_value).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_default() { + let default_tx = OpTxType::default(); + assert_eq!(default_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_partial_eq_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + assert_eq!(op_tx, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_encodable() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let mut buf = BytesMut::new(); + op_tx.encode(&mut buf); + assert_eq!(buf, BytesMut::from(&[0x80][..])); + } + + #[test] + fn test_decodable_success() { + // Using the RLP-encoded form of 0, which is `b"\x80"` + let mut buf: &[u8] = &[0x80]; + let decoded_tx = OpTxType::decode(&mut buf).unwrap(); + assert_eq!(decoded_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_decodable_invalid() { + let mut buf: &[u8] = &[255]; + let result = OpTxType::decode(&mut buf); + assert!(result.is_err()); + } +} diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index a25a7d659bd..6ca55879442 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -26,3 +26,25 @@ pub trait TxType: + Decodable { } + +impl TxType for T where + T: Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + + PartialEq + + Eq + + PartialEq + + Into + + Into + + TryFrom + + TryFrom + + TryFrom + + Encodable + + Decodable +{ +} From f38503c2bc8d5de6e71393a3b232c2b6f3369f29 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:58:36 +0700 Subject: [PATCH 419/970] chore: move `(Full)NodePrimitives` to `reth-primitive-traits` (#12461) Co-authored-by: Emilia Hane --- crates/node/types/src/lib.rs | 41 +++------------------------- crates/primitives-traits/src/lib.rs | 4 +++ crates/primitives-traits/src/node.rs | 38 ++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 37 deletions(-) create mode 100644 crates/primitives-traits/src/node.rs diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index f2bd16280f8..f8770a3c014 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -9,9 +9,11 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] -pub use reth_primitives_traits::{Block, BlockBody, FullBlock, FullReceipt, FullSignedTx}; +pub use reth_primitives_traits::{ + Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, +}; -use core::{fmt, marker::PhantomData}; +use core::marker::PhantomData; use reth_chainspec::EthChainSpec; use reth_db_api::{ @@ -21,41 +23,6 @@ use reth_db_api::{ use reth_engine_primitives::EngineTypes; use reth_trie_db::StateCommitment; -/// Configures all the primitive types of the node. -pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { - /// Block primitive. - type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; - /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; - /// A receipt. - type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; -} - -impl NodePrimitives for () { - type Block = (); - type SignedTx = (); - type Receipt = (); -} - -/// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { - /// Block primitive. - type Block: FullBlock>; - /// Signed version of the transaction type. - type SignedTx: FullSignedTx; - /// A receipt. - type Receipt: FullReceipt; -} - -impl NodePrimitives for T -where - T: FullNodePrimitives, -{ - type Block = T::Block; - type SignedTx = T::SignedTx; - type Receipt = T::Receipt; -} - /// The type that configures the essential types of an Ethereum-like node. /// /// This includes the primitive types of a node and chain specification. diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 6fcb725cfa4..babc0f42e0b 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -77,3 +77,7 @@ pub mod serde_bincode_compat { /// Heuristic size trait pub mod size; pub use size::InMemorySize; + +/// Node traits +pub mod node; +pub use node::{FullNodePrimitives, NodePrimitives}; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs new file mode 100644 index 00000000000..921942841d4 --- /dev/null +++ b/crates/primitives-traits/src/node.rs @@ -0,0 +1,38 @@ +use core::fmt; + +use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx}; + +/// Configures all the primitive types of the node. +pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { + /// Block primitive. + type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// Signed version of the transaction type. + type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// A receipt. + type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; +} + +impl NodePrimitives for () { + type Block = (); + type SignedTx = (); + type Receipt = (); +} + +/// Helper trait that sets trait bounds on [`NodePrimitives`]. +pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { + /// Block primitive. + type Block: FullBlock>; + /// Signed version of the transaction type. + type SignedTx: FullSignedTx; + /// A receipt. + type Receipt: FullReceipt; +} + +impl NodePrimitives for T +where + T: FullNodePrimitives, +{ + type Block = T::Block; + type SignedTx = T::SignedTx; + type Receipt = T::Receipt; +} From c261532a27e99f05c6f5838e2b2acccf9e41ddf4 Mon Sep 17 00:00:00 2001 From: c0np4nn4 Date: Tue, 12 Nov 2024 17:15:52 +0700 Subject: [PATCH 420/970] chore: move trie functions to alloy (#12438) --- Cargo.lock | 3 +++ crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/proof.rs | 2 +- crates/primitives/Cargo.toml | 9 +++++--- crates/primitives/src/proofs.rs | 2 +- crates/trie/common/src/proofs.rs | 3 ++- crates/trie/common/src/root.rs | 32 -------------------------- crates/trie/trie/Cargo.toml | 4 +++- 8 files changed, 17 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2a9310c485..1bf11ac9baa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8191,6 +8191,7 @@ version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", + "alloy-trie", "reth-chainspec", "reth-consensus", "reth-consensus-common", @@ -8451,6 +8452,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types", "alloy-serde", + "alloy-trie", "arbitrary", "assert_matches", "bincode", @@ -9226,6 +9228,7 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "alloy-trie", "auto_impl", "bincode", "criterion", diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index e2520c89340..0dffceaddca 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -26,6 +26,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 813e451da25..18e64a467ff 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,10 +1,10 @@ //! Helper function for Receipt root calculation for Optimism hardforks. use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; use reth_optimism_forks::OpHardfork; use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; -use reth_trie_common::root::ordered_trie_root_with_encoder; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 5bef33e15ef..1a4c33c7180 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true -reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } @@ -28,6 +27,7 @@ alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } alloy-eips = { workspace = true, features = ["serde"] } +alloy-trie = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } @@ -66,6 +66,7 @@ reth-chainspec.workspace = true reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true +reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -102,6 +103,7 @@ std = [ "revm-primitives/std", "secp256k1?/std", "serde/std", + "alloy-trie/std" ] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] @@ -115,14 +117,15 @@ arbitrary = [ "revm-primitives/arbitrary", "secp256k1", "reth-chainspec/arbitrary", - "reth-trie-common/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", "alloy-rpc-types?/arbitrary", "alloy-serde?/arbitrary", "op-alloy-consensus?/arbitrary", "op-alloy-rpc-types?/arbitrary", - "reth-codecs?/arbitrary" + "reth-codecs?/arbitrary", + "alloy-trie/arbitrary", + "reth-trie-common/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 000244d2c54..10b7bc2530b 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; -use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; +use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// Calculate a transaction root. /// diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index a94b2b96fbd..108910a1384 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -234,11 +234,12 @@ impl StorageProof { #[cfg(any(test, feature = "test-utils"))] pub mod triehash { use alloy_primitives::{keccak256, B256}; + use alloy_rlp::RlpEncodable; use hash_db::Hasher; use plain_hasher::PlainHasher; /// A [Hasher] that calculates a keccak256 hash of the given data. - #[derive(Default, Debug, Clone, PartialEq, Eq)] + #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable)] #[non_exhaustive] pub struct KeccakHasher; diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 20f3ba1366d..dbcbf4200d7 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -18,38 +18,6 @@ pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { } } -/// Compute a trie root of the collection of rlp encodable items. -pub fn ordered_trie_root(items: &[T]) -> B256 { - ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) -} - -/// Compute a trie root of the collection of items with a custom encoder. -pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 -where - F: FnMut(&T, &mut Vec), -{ - if items.is_empty() { - return alloy_trie::EMPTY_ROOT_HASH; - } - - let mut value_buffer = Vec::new(); - - let mut hb = HashBuilder::default(); - let items_len = items.len(); - for i in 0..items_len { - let index = adjust_index_for_rlp(i, items_len); - - let index_buffer = alloy_rlp::encode_fixed_size(&index); - - value_buffer.clear(); - encode(&items[index], &mut value_buffer); - - hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); - } - - hb.root() -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 134a3055c2b..6136fa8e56b 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -25,6 +25,7 @@ revm.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true # tracing tracing.workspace = true @@ -68,7 +69,8 @@ serde = [ "dep:serde", "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde" + "revm/serde", + "alloy-trie/serde" ] serde-bincode-compat = [ "serde_with", From bad7a4f0c90aebdc76a45e8fca8d2719b50d1ae1 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Tue, 12 Nov 2024 05:31:32 -0500 Subject: [PATCH 421/970] use result for `TransactionCompact::fill`. (#12170) Co-authored-by: Emilia Hane Co-authored-by: dkathiriya --- Cargo.lock | 1 + crates/optimism/rpc/src/error.rs | 8 ++++- crates/optimism/rpc/src/eth/transaction.rs | 12 +++---- crates/rpc/rpc-eth-api/src/core.rs | 3 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 3 +- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 1 - .../rpc-eth-api/src/helpers/transaction.rs | 6 ++-- crates/rpc/rpc-eth-api/src/lib.rs | 4 ++- crates/rpc/rpc-eth-api/src/types.rs | 15 ++++++-- .../src/error/api.rs} | 9 ++--- .../src/{error.rs => error/mod.rs} | 5 ++- crates/rpc/rpc-eth-types/src/simulate.rs | 11 +++--- crates/rpc/rpc-eth-types/src/transaction.rs | 5 ++- crates/rpc/rpc-types-compat/Cargo.toml | 1 + crates/rpc/rpc-types-compat/src/block.rs | 11 +++--- .../rpc/rpc-types-compat/src/transaction.rs | 14 ++++++-- crates/rpc/rpc/src/eth/filter.rs | 15 +++++--- crates/rpc/rpc/src/eth/helpers/types.rs | 9 +++-- crates/rpc/rpc/src/eth/pubsub.rs | 19 +++++++++-- crates/rpc/rpc/src/txpool.rs | 34 +++++++++++-------- 20 files changed, 124 insertions(+), 62 deletions(-) rename crates/rpc/{rpc-eth-api/src/helpers/error.rs => rpc-eth-types/src/error/api.rs} (87%) rename crates/rpc/rpc-eth-types/src/{error.rs => error/mod.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index 1bf11ac9baa..38a87ebf930 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8958,6 +8958,7 @@ dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", + "jsonrpsee-types", "reth-primitives", "reth-trie-common", "serde", diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index ffc698b6e98..1dd7a639eac 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,6 +1,6 @@ //! RPC errors specific to OP. -use alloy_rpc_types_eth::error::EthRpcErrorCode; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; use reth_optimism_evm::OpBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; @@ -113,3 +113,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> ) } } + +impl From for OpEthApiError { + fn from(error: BlockError) -> Self { + Self::Eth(error.into()) + } +} diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 90e5e33feb7..3ff7cb10df1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -15,7 +15,7 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use crate::{OpEthApi, SequencerClient}; +use crate::{OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where @@ -76,12 +76,13 @@ where N: FullNodeComponents, { type Transaction = Transaction; + type Error = OpEthApiError; fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { + ) -> Result { let from = tx.signer(); let TransactionSigned { transaction, signature, hash } = tx.into_signed(); @@ -106,8 +107,7 @@ where .inner .provider() .receipt_by_hash(hash) - .ok() // todo: change sig to return result - .flatten() + .map_err(Self::Error::from_eth_err)? .and_then(|receipt| receipt.deposit_receipt_version); let TransactionInfo { @@ -120,7 +120,7 @@ where }) .unwrap_or_else(|| inner.max_fee_per_gas()); - Transaction { + Ok(Transaction { inner: alloy_rpc_types_eth::Transaction { inner, block_hash, @@ -130,7 +130,7 @@ where effective_gas_price: Some(effective_gas_price), }, deposit_receipt_version, - } + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 421c10f8b41..8072021d990 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -501,7 +501,8 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction(self.tx_resp_builder()))) + .map(|tx| tx.into_transaction(self.tx_resp_builder())) + .transpose()?) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index e25ea84d699..7125857b898 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -64,8 +64,7 @@ pub trait EthBlocks: LoadBlock { full.into(), Some(block_hash), self.tx_resp_builder(), - ) - .map_err(Self::Error::from_eth_err)?; + )?; Ok(Some(block)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 8adb0e281e7..a881330b045 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,7 +17,6 @@ pub mod block; pub mod blocking_task; pub mod call; -pub mod error; pub mod fee; pub mod pending_block; pub mod receipt; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 234008f21fe..ca4b0322e72 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -208,7 +208,7 @@ pub trait EthTransactions: LoadTransaction { tx.clone().with_signer(*signer), tx_info, self.tx_resp_builder(), - ))) + )?)) } } @@ -233,7 +233,7 @@ pub trait EthTransactions: LoadTransaction { RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); + return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); } } @@ -291,7 +291,7 @@ pub trait EthTransactions: LoadTransaction { ) }) }) - .ok_or(EthApiError::HeaderNotFound(block_id).into()) + .ok_or(EthApiError::HeaderNotFound(block_id))? .map(Some) } } diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index fa9737f84f0..cb97a03e8b8 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -20,12 +20,14 @@ pub mod node; pub mod pubsub; pub mod types; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; pub use reth_rpc_types_compat::TransactionCompat; pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; -pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index b75bce026fb..12ff090d37c 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -39,15 +39,26 @@ pub type RpcBlock = Block, ::HeaderResponse>; /// Adapter for network specific receipt type. pub type RpcReceipt = ::ReceiptResponse; +/// Adapter for network specific error type. +pub type RpcError = ::Error; + /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes: - EthApiTypes>> + EthApiTypes< + TransactionCompat: TransactionCompat< + Transaction = RpcTransaction, + Error = RpcError, + >, +> { } impl FullEthApiTypes for T where T: EthApiTypes< - TransactionCompat: TransactionCompat>, + TransactionCompat: TransactionCompat< + Transaction = RpcTransaction, + Error = RpcError, + >, > { } diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-types/src/error/api.rs similarity index 87% rename from crates/rpc/rpc-eth-api/src/helpers/error.rs rename to crates/rpc/rpc-eth-types/src/error/api.rs index 1d991b8e65b..419f530c4e2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -1,9 +1,10 @@ //! Helper traits to wrap generic l1 errors, in network specific error type configured in -//! [`EthApiTypes`](crate::EthApiTypes). +//! `reth_rpc_eth_api::EthApiTypes`. -use reth_rpc_eth_types::EthApiError; use revm_primitives::EVMError; +use crate::EthApiError; + /// Helper trait to wrap core [`EthApiError`]. pub trait FromEthApiError: From { /// Converts from error via [`EthApiError`]. @@ -51,7 +52,7 @@ pub trait AsEthApiError { fn as_err(&self) -> Option<&EthApiError>; /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). + /// [`RpcInvalidTransactionError::GasTooHigh`](crate::RpcInvalidTransactionError::GasTooHigh). fn is_gas_too_high(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_high() @@ -61,7 +62,7 @@ pub trait AsEthApiError { } /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + /// [`RpcInvalidTransactionError::GasTooLow`](crate::RpcInvalidTransactionError::GasTooLow). fn is_gas_too_low(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_low() diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs similarity index 99% rename from crates/rpc/rpc-eth-types/src/error.rs rename to crates/rpc/rpc-eth-types/src/error/mod.rs index 641cbc88291..99c41daea37 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -1,6 +1,9 @@ //! Implementation specific Errors for the `eth_` namespace. -use std::time::Duration; +pub mod api; +pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; + +use core::time::Duration; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index b2a9a5e62ed..91aaa25430e 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -21,8 +21,9 @@ use revm::{db::CacheDB, Database}; use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::ToRpcError, EthApiError, RevertError, - RpcInvalidTransactionError, + cache::db::StateProviderTraitObjWrapper, + error::{api::FromEthApiError, ToRpcError}, + EthApiError, RevertError, RpcInvalidTransactionError, }; /// Errors which may occur during `eth_simulateV1` execution. @@ -170,7 +171,7 @@ where /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. #[expect(clippy::complexity)] -pub fn build_block( +pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, block_env: &BlockEnv, @@ -179,7 +180,7 @@ pub fn build_block( full_transactions: bool, db: &CacheDB>>, tx_resp_builder: &T, -) -> Result>, EthApiError> { +) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); let mut receipts = Vec::with_capacity(results.len()); @@ -272,7 +273,7 @@ pub fn build_block( } } - let state_root = db.db.state_root(hashed_state)?; + let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; let header = reth_primitives::Header { beneficiary: block_env.coinbase, diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index bfff1cafead..a4ede0a1a4e 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -41,7 +41,10 @@ impl TransactionSource { } /// Conversion into network specific transaction type. - pub fn into_transaction(self, resp_builder: &T) -> T::Transaction { + pub fn into_transaction( + self, + resp_builder: &T, + ) -> Result { match self { Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 2e45d210d17..887986ada12 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -27,6 +27,7 @@ alloy-consensus.workspace = true # io serde.workspace = true +jsonrpsee-types.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 41bd057dfd6..43086b311bd 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,15 +1,16 @@ //! Compatibility functions for rpc `Block` type. -use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; use alloy_consensus::Sealed; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ - Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, + Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; +use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; + /// Converts the given primitive block into a [`Block`] response with the given /// [`BlockTransactionsKind`] /// @@ -20,7 +21,7 @@ pub fn from_block( kind: BlockTransactionsKind, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { match kind { BlockTransactionsKind::Hashes => { Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) @@ -63,7 +64,7 @@ pub fn from_block_full( total_difficulty: U256, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); let block_number = block.block.number; let base_fee_per_gas = block.block.base_fee_per_gas; @@ -88,7 +89,7 @@ pub fn from_block_full( from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) }) - .collect::>(); + .collect::, T::Error>>()?; Ok(from_block_with_transactions( block_length, diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index cfbaaa622fb..9e8fae67096 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -1,5 +1,6 @@ //! Compatibility functions for rpc `Transaction` type. +use core::error; use std::fmt; use alloy_consensus::Transaction as _; @@ -19,7 +20,7 @@ pub fn from_recovered_with_block_context( tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, tx_info) } @@ -28,7 +29,7 @@ pub fn from_recovered_with_block_context( pub fn from_recovered( tx: TransactionSignedEcRecovered, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, TransactionInfo::default()) } @@ -43,9 +44,16 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { + Clone + fmt::Debug; + /// RPC transaction error type. + type Error: error::Error + Into>; + /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_inf: TransactionInfo, + ) -> Result; /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 589cb801e2c..3782780f5a6 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -34,7 +34,7 @@ use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, }; -use tracing::trace; +use tracing::{error, trace}; /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb @@ -625,10 +625,15 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered( - tx.transaction.to_recovered_transaction(), - &self.tx_resp_builder, - )) + match from_recovered(tx.transaction.to_recovered_transaction(), &self.tx_resp_builder) { + Ok(tx) => pending_txs.push(tx), + Err(err) => { + error!(target: "rpc", + %err, + "Failed to fill txn with block context" + ); + } + } } FilterChanges::Transactions(pending_txs) } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 19ffc55b398..d1ce84bc0b7 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -4,6 +4,7 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; /// Builds RPC transaction response for l1. @@ -16,11 +17,13 @@ where { type Transaction = ::TransactionResponse; + type Error = EthApiError; + fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { + ) -> Result { let from = tx.signer(); let TransactionSigned { transaction, signature, hash } = tx.into_signed(); @@ -54,14 +57,14 @@ where }) .unwrap_or_else(|| inner.max_fee_per_gas()); - Transaction { + Ok(Transaction { inner, block_hash, block_number, transaction_index, from, effective_gas_price: Some(effective_gas_price), - } + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 0702e3147ce..8ea6d1f87c8 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -27,6 +27,7 @@ use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, }; +use tracing::error; /// `Eth` pubsub RPC implementation. /// @@ -146,11 +147,23 @@ where match params { Params::Bool(true) => { // full transaction objects requested - let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered( + let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { + let tx_value = match from_recovered( tx.transaction.to_recovered_transaction(), &tx_resp_builder, - ))) + ) { + Ok(tx) => { + Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) + } + Err(err) => { + error!(target = "rpc", + %err, + "Failed to fill transaction with block context" + ); + None + } + }; + std::future::ready(tx_value) }); return pipe_from_stream(accepted_sink, stream).await } diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index d03e10ca75a..3e46183b466 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::BTreeMap; use alloy_consensus::Transaction; @@ -6,7 +7,7 @@ use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, }; use async_trait::async_trait; -use jsonrpsee::core::RpcResult as Result; +use jsonrpsee::core::RpcResult; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; @@ -35,33 +36,36 @@ where Pool: TransactionPool + 'static, Eth: TransactionCompat, { - fn content(&self) -> TxpoolContent { + fn content(&self) -> Result, Eth::Error> { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, resp_builder: &RpcTxB, - ) where + ) -> Result<(), RpcTxB::Error> + where Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone().into_consensus().into(), resp_builder), + from_recovered(tx.clone().into_consensus().into(), resp_builder)?, ); + + Ok(()) } let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?; } for queued in queued { - insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?; } - content + Ok(content) } } @@ -76,7 +80,7 @@ where /// Ref: [Here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status) /// /// Handler for `txpool_status` - async fn txpool_status(&self) -> Result { + async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); let all = self.pool.all_transactions(); Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) @@ -88,7 +92,7 @@ where /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details /// /// Handler for `txpool_inspect` - async fn txpool_inspect(&self) -> Result { + async fn txpool_inspect(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] @@ -131,9 +135,9 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result> { + ) -> RpcResult> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); - Ok(self.content().remove_from(&from)) + Ok(self.content().map_err(Into::into)?.remove_from(&from)) } /// Returns the details of all transactions currently pending for inclusion in the next @@ -141,14 +145,14 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result> { + async fn txpool_content(&self) -> RpcResult> { trace!(target: "rpc::eth", "Serving txpool_content"); - Ok(self.content()) + Ok(self.content().map_err(Into::into)?) } } -impl std::fmt::Debug for TxPoolApi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for TxPoolApi { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TxpoolApi").finish_non_exhaustive() } } From b5f7eca72f80ffc26602e962a37b7b79b797de1a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Nov 2024 11:38:00 +0100 Subject: [PATCH 422/970] chore(sdk): make `ExecutionOutcome` generic over receipt (#12448) Co-authored-by: Federico Gimenez --- Cargo.lock | 1 + crates/engine/util/src/reorg.rs | 2 +- crates/evm/execution-types/Cargo.toml | 7 +- .../execution-types/src/execution_outcome.rs | 82 +++++++++++-------- crates/optimism/evm/src/lib.rs | 2 +- crates/primitives-traits/src/receipt.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 9 +- 7 files changed, 61 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38a87ebf930..bd3a93eda7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7518,6 +7518,7 @@ dependencies = [ "rand 0.8.5", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-trie", "revm", "serde", diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 69831389a65..169b6f5ede7 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -375,7 +375,7 @@ where // and 4788 contract call state.merge_transitions(BundleRetention::PlainState); - let outcome = ExecutionOutcome::new( + let outcome: ExecutionOutcome = ExecutionOutcome::new( state.take_bundle(), Receipts::from(vec![receipts]), reorg_target.number, diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index b6af3dee9af..13b0aef8ad4 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -14,6 +14,7 @@ workspace = true reth-primitives.workspace = true reth-execution-errors.workspace = true reth-trie.workspace = true +reth-primitives-traits.workspace = true revm.workspace = true @@ -43,14 +44,16 @@ serde = [ ] serde-bincode-compat = [ "reth-primitives/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", ] std = [ "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", "revm/std", - "serde?/std" + "serde?/std", + "reth-primitives-traits/std", ] diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 026e6b37c42..c1d9c701650 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,13 +1,16 @@ -use crate::BlockExecutionOutput; +use std::collections::HashMap; + use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, StorageEntry}; +use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; +use reth_primitives_traits::Receipt; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, }; -use std::collections::HashMap; + +use crate::BlockExecutionOutput; /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -33,7 +36,7 @@ impl ChangedAccount { /// blocks, capturing the resulting state, receipts, and requests following the execution. #[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ExecutionOutcome { +pub struct ExecutionOutcome { /// Bundle state with reverts. pub bundle: BundleState, /// The collection of receipts. @@ -41,7 +44,7 @@ pub struct ExecutionOutcome { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - pub receipts: Receipts, + pub receipts: Receipts, /// First block of bundle state. pub first_block: BlockNumber, /// The collection of EIP-7685 requests. @@ -63,14 +66,14 @@ pub type AccountRevertInit = (Option>, Vec); /// Type used to initialize revms reverts. pub type RevertsInit = HashMap>; -impl ExecutionOutcome { +impl ExecutionOutcome { /// Creates a new `ExecutionOutcome`. /// /// This constructor initializes a new `ExecutionOutcome` instance with the provided /// bundle state, receipts, first block number, and EIP-7685 requests. pub const fn new( bundle: BundleState, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -85,7 +88,7 @@ impl ExecutionOutcome { state_init: BundleStateInit, revert_init: RevertsInit, contracts_init: impl IntoIterator, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -180,27 +183,33 @@ impl ExecutionOutcome { } /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> { + pub fn logs(&self, block_number: BlockNumber) -> Option> + where + T: Receipt, + { let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) } /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option + where + T: Receipt, + { Some(logs_bloom(self.logs(block_number)?)) } /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option + where + T: Receipt, + { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); #[cfg(not(feature = "optimism"))] - self.receipts.root_slow( - self.block_number_to_index(_block_number)?, - reth_primitives::proofs::calculate_receipt_root_no_memo, - ) + self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) } /// Returns the receipt root for all recorded receipts. @@ -209,23 +218,23 @@ impl ExecutionOutcome { pub fn generic_receipts_root_slow( &self, block_number: BlockNumber, - f: impl FnOnce(&[&Receipt]) -> B256, + f: impl FnOnce(&[&T]) -> B256, ) -> Option { self.receipts.root_slow(self.block_number_to_index(block_number)?, f) } /// Returns reference to receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns mutable reference to receipts. - pub fn receipts_mut(&mut self) -> &mut Receipts { + pub fn receipts_mut(&mut self) -> &mut Receipts { &mut self.receipts } /// Return all block receipts - pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { let Some(index) = self.block_number_to_index(block_number) else { return &[] }; &self.receipts[index] } @@ -277,7 +286,10 @@ impl ExecutionOutcome { /// # Panics /// /// If the target block number is not included in the state block range. - pub fn split_at(self, at: BlockNumber) -> (Option, Self) { + pub fn split_at(self, at: BlockNumber) -> (Option, Self) + where + T: Clone, + { if at == self.first_block { return (None, self) } @@ -329,7 +341,7 @@ impl ExecutionOutcome { } /// Create a new instance with updated receipts. - pub fn with_receipts(mut self, receipts: Receipts) -> Self { + pub fn with_receipts(mut self, receipts: Receipts) -> Self { self.receipts = receipts; self } @@ -352,8 +364,8 @@ impl ExecutionOutcome { } } -impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { - fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { +impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { + fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { Self { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), @@ -385,7 +397,7 @@ mod tests { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -447,7 +459,7 @@ mod tests { fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -482,7 +494,7 @@ mod tests { fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -514,7 +526,7 @@ mod tests { fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -540,7 +552,7 @@ mod tests { // Assert that the receipts for block number 123 match the expected receipts assert_eq!( receipts_by_block, - vec![&Some(Receipt { + vec![&Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -554,7 +566,7 @@ mod tests { fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -563,7 +575,7 @@ mod tests { }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty: Receipts = Receipts { receipt_vec: vec![] }; // Define the first block number let first_block = 123; @@ -602,7 +614,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -651,7 +663,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -695,7 +707,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -803,7 +815,7 @@ mod tests { }, ); - let execution_outcome = ExecutionOutcome { + let execution_outcome: ExecutionOutcome = ExecutionOutcome { bundle: bundle_state, receipts: Receipts::default(), first_block: 0, diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 8f0f75782f4..cfa7dfa5849 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -820,7 +820,7 @@ mod tests { }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty = Receipts:: { receipt_vec: vec![] }; // Define the first block number let first_block = 123; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index bfcd99b08ec..68917d62812 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -29,6 +29,6 @@ pub trait Receipt: /// Returns transaction type. fn tx_type(&self) -> u8; - /// Calculates the receipts root of all receipts in a block. + /// Calculates the receipts root of the given receipts. fn receipts_root(receipts: &[&Self]) -> B256; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 37092a5dd51..6ca024b0a9f 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1129,7 +1129,8 @@ mod tests { let bundle = state.take_bundle(); - let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); + let outcome: ExecutionOutcome = + ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); let mut writer = UnifiedStorageWriter::from_database(&provider); writer .write_to_storage(outcome, OriginalValuesKnown::Yes) @@ -1375,7 +1376,7 @@ mod tests { #[test] fn revert_to_indices() { - let base = ExecutionOutcome { + let base: ExecutionOutcome = ExecutionOutcome { bundle: BundleState::default(), receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), first_block: 10, @@ -1441,7 +1442,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::new( + ExecutionOutcome::::new( state.bundle_state.clone(), Receipts::default(), 0, @@ -1592,7 +1593,7 @@ mod tests { .build(); assert_eq!(previous_state.reverts.len(), 1); - let mut test = ExecutionOutcome { + let mut test: ExecutionOutcome = ExecutionOutcome { bundle: present_state, receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), first_block: 2, From a2e11977d8e873b2229af32db52432fc0382dadb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Nov 2024 11:52:26 +0100 Subject: [PATCH 423/970] chore(sdk): Add `InMemorySize` as super trait of data primitive traits (#12465) --- crates/primitives-traits/src/block/body.rs | 6 ++---- crates/primitives-traits/src/block/mod.rs | 6 ++---- crates/primitives-traits/src/transaction/mod.rs | 6 +++--- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 9b703c0d2f1..14941ffed0f 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -6,7 +6,7 @@ use alloy_consensus::{BlockHeader, Transaction, TxType}; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256}; -use crate::Block; +use crate::{Block, InMemorySize}; /// Abstraction for block's body. pub trait BlockBody: @@ -22,6 +22,7 @@ pub trait BlockBody: + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction @@ -93,7 +94,4 @@ pub trait BlockBody: fn blob_versioned_hashes(&self) -> Vec<&B256> { self.blob_versioned_hashes_iter().collect() } - - /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. - fn size(&self) -> usize; } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 185b61e9782..cfc9e9a5503 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -8,7 +8,7 @@ use alloc::{fmt, vec::Vec}; use alloy_primitives::{Address, B256}; use reth_codecs::Compact; -use crate::{BlockBody, BlockHeader, FullBlockHeader}; +use crate::{BlockBody, BlockHeader, FullBlockHeader, InMemorySize}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: Block + Compact {} @@ -32,6 +32,7 @@ pub trait Block: + for<'a> serde::Deserialize<'a> + From<(Self::Header, Self::Body)> + Into<(Self::Header, Self::Body)> + + InMemorySize { /// Header part of the block. type Header: BlockHeader; @@ -104,7 +105,4 @@ pub trait Block: // todo: can be default impl if sealed block type is made generic over header and body and // migrated to alloy fn with_recovered_senders(self) -> Option>; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; } diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 7fd0ec88b31..d5061ca3909 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -7,6 +7,8 @@ use alloy_primitives::{TxKind, B256}; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; +use crate::InMemorySize; + pub mod signed; #[allow(dead_code)] @@ -26,6 +28,7 @@ pub trait Transaction: + alloy_rlp::Decodable + for<'de> Deserialize<'de> + alloy_consensus::Transaction + + InMemorySize + MaybeArbitrary { /// Heavy operation that return signature hash over rlp encoded transaction. @@ -45,9 +48,6 @@ pub trait Transaction: /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. fn encode_without_signature(&self, out: &mut dyn bytes::BufMut); - - /// Calculates a heuristic for the in-memory size of the [Transaction]. - fn size(&self) -> usize; } #[cfg(not(feature = "arbitrary"))] From 9f29107abb15b64d45843204e2e0fb32c65a5233 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Nov 2024 12:11:37 +0100 Subject: [PATCH 424/970] chore(sdk): add `NodePrimitives::TxType` (#12332) --- crates/ethereum/node/src/node.rs | 3 ++- crates/optimism/node/src/node.rs | 3 ++- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/node.rs | 10 ++++++++-- crates/primitives-traits/src/tx_type.rs | 7 +++++++ 5 files changed, 20 insertions(+), 5 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 68ed879d223..b37d0227a78 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -25,7 +25,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt, TransactionSigned}; +use reth_primitives::{Block, Header, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -44,6 +44,7 @@ pub struct EthPrimitives; impl NodePrimitives for EthPrimitives { type Block = Block; type SignedTx = TransactionSigned; + type TxType = TxType; type Receipt = Receipt; } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 323148e276b..e97924d4b55 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -24,7 +24,7 @@ use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; -use reth_primitives::{Block, Header, Receipt, TransactionSigned}; +use reth_primitives::{Block, Header, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -47,6 +47,7 @@ pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { type Block = Block; type SignedTx = TransactionSigned; + type TxType = TxType; type Receipt = Receipt; } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index babc0f42e0b..afcc74a894d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -54,7 +54,7 @@ pub use storage::StorageEntry; /// Transaction types pub mod tx_type; -pub use tx_type::TxType; +pub use tx_type::{FullTxType, TxType}; /// Common header types pub mod header; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 921942841d4..cebbbe202e8 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,6 +1,6 @@ use core::fmt; -use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx}; +use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType}; /// Configures all the primitive types of the node. pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { @@ -8,6 +8,8 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// Signed version of the transaction type. type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// Transaction envelope type ID. + type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// A receipt. type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; } @@ -15,6 +17,7 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { impl NodePrimitives for () { type Block = (); type SignedTx = (); + type TxType = (); type Receipt = (); } @@ -24,15 +27,18 @@ pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug type Block: FullBlock>; /// Signed version of the transaction type. type SignedTx: FullSignedTx; + /// Transaction envelope type ID. + type TxType: FullTxType; /// A receipt. type Receipt: FullReceipt; } impl NodePrimitives for T where - T: FullNodePrimitives, + T: FullNodePrimitives, { type Block = T::Block; type SignedTx = T::SignedTx; + type TxType = T::TxType; type Receipt = T::Receipt; } diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index 6ca55879442..e0bf28d2a99 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -3,6 +3,13 @@ use core::fmt; use alloy_eips::eip2718::Eip2718Error; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use reth_codecs::Compact; + +/// Helper trait that unifies all behaviour required by transaction type ID to support full node +/// operations. +pub trait FullTxType: TxType + Compact {} + +impl FullTxType for T where T: TxType + Compact {} /// Trait representing the behavior of a transaction type. pub trait TxType: From 179aa047072a4b9d7fb51db555d6566a9122dd77 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 12 Nov 2024 12:29:51 +0100 Subject: [PATCH 425/970] test(trie): add ParallelProof unit test (#12413) --- crates/trie/common/src/proofs.rs | 4 +- crates/trie/parallel/src/proof.rs | 89 ++++++++++++++++++++++++++++++- 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 108910a1384..f6eaf3960ec 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -17,7 +17,7 @@ use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes /// in the paths of target accounts. -#[derive(Clone, Default, Debug)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct MultiProof { /// State trie multiproof for requested accounts. pub account_subtree: ProofNodes, @@ -79,7 +79,7 @@ impl MultiProof { } /// The merkle multiproof of storage trie. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageMultiProof { /// Storage trie root. pub root: B256, diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 4cb99b50d0c..bafb9917c60 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,5 +1,8 @@ use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; -use alloy_primitives::{map::HashSet, B256}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_db::DatabaseError; @@ -18,7 +21,7 @@ use reth_trie::{ }; use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use tracing::debug; #[cfg(feature = "metrics")] @@ -210,3 +213,85 @@ where Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{keccak256, map::DefaultHashBuilder, Address, U256}; + use rand::Rng; + use reth_primitives::{Account, StorageEntry}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_trie::proof::Proof; + + #[test] + fn random_parallel_proof() { + let factory = create_test_provider_factory(); + let consistent_view = ConsistentDbView::new(factory.clone(), None); + + let mut rng = rand::thread_rng(); + let state = (0..100) + .map(|_| { + let address = Address::random(); + let account = + Account { balance: U256::from(rng.gen::()), ..Default::default() }; + let mut storage = HashMap::::default(); + let has_storage = rng.gen_bool(0.7); + if has_storage { + for _ in 0..100 { + storage.insert( + B256::from(U256::from(rng.gen::())), + U256::from(rng.gen::()), + ); + } + } + (address, (account, storage)) + }) + .collect::>(); + + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw + .insert_account_for_hashing( + state.iter().map(|(address, (account, _))| (*address, Some(*account))), + ) + .unwrap(); + provider_rw + .insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| { + ( + *address, + storage + .iter() + .map(|(slot, value)| StorageEntry { key: *slot, value: *value }), + ) + })) + .unwrap(); + provider_rw.commit().unwrap(); + } + + let mut targets = + HashMap::, DefaultHashBuilder>::default(); + for (address, (_, storage)) in state.iter().take(10) { + let hashed_address = keccak256(*address); + let mut target_slots = HashSet::::default(); + + for (slot, _) in storage.iter().take(5) { + target_slots.insert(*slot); + } + + if !target_slots.is_empty() { + targets.insert(hashed_address, target_slots); + } + } + + let provider_rw = factory.provider_rw().unwrap(); + let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_rw.tx_ref()); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider_rw.tx_ref()); + + assert_eq!( + ParallelProof::new(consistent_view, Default::default()) + .multiproof(targets.clone()) + .unwrap(), + Proof::new(trie_cursor_factory, hashed_cursor_factory).multiproof(targets).unwrap() + ); + } +} From 6c1833de315eb9012a9581747cc309d0456afdca Mon Sep 17 00:00:00 2001 From: malik Date: Tue, 12 Nov 2024 12:48:39 +0100 Subject: [PATCH 426/970] chore: remove unessarcy clone (#12463) --- crates/chain-state/src/in_memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index bfae8113e3e..8794bb393ca 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -269,7 +269,7 @@ impl CanonicalInMemoryState { // insert the new blocks for block in new_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); From c44edf5ce26cb326002169ec666f22ff4819bbeb Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Tue, 12 Nov 2024 06:15:28 -0600 Subject: [PATCH 427/970] make PayloadStore operate on PayloadBuilder (#12460) Co-authored-by: Matthias Seitz --- crates/node/builder/src/rpc.rs | 5 ++- crates/optimism/node/src/node.rs | 8 ++-- crates/payload/builder/src/service.rs | 22 +++++---- crates/payload/primitives/src/lib.rs | 2 +- crates/payload/primitives/src/traits.rs | 60 ++++++++++++++++++++++++- 5 files changed, 81 insertions(+), 16 deletions(-) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 4530bbe7014..9680c221d7c 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -17,6 +17,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; +use reth_payload_primitives::PayloadBuilder; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -402,7 +403,7 @@ impl NodeAddOns for RpcAddOns where N: FullNodeComponents< Types: ProviderNodeTypes, - PayloadBuilder: Into::Engine>>, + PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -426,7 +427,7 @@ where node.provider().clone(), config.chain.clone(), beacon_engine_handle, - node.payload_builder().clone().into(), + PayloadStore::new(node.payload_builder().clone()), node.pool().clone(), Box::new(node.task_executor().clone()), client, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index e97924d4b55..efc8964ffab 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,7 +7,7 @@ use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -23,7 +23,7 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Header, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; @@ -152,7 +152,7 @@ impl NodeAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: Into::Engine>>, + PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -170,7 +170,7 @@ impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: Into::Engine>>, + PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 2c9975cb4c3..267a1e355b0 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -12,12 +12,13 @@ use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_payload_primitives::{ BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadKind, PayloadTypes, + PayloadEvents, PayloadKind, PayloadStoreExt, PayloadTypes, }; use std::{ fmt, future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use tokio::sync::{ @@ -30,13 +31,14 @@ use tracing::{debug, info, trace, warn}; type PayloadFuture

= Pin> + Send + Sync>>; /// A communication channel to the [`PayloadBuilderService`] that can retrieve payloads. +/// +/// This type is intended to be used to retrieve payloads from the service (e.g. from the engine +/// API). #[derive(Debug)] pub struct PayloadStore { - inner: PayloadBuilderHandle, + inner: Arc>, } -// === impl PayloadStore === - impl PayloadStore where T: PayloadTypes, @@ -82,12 +84,16 @@ where } } -impl Clone for PayloadStore +impl PayloadStore where T: PayloadTypes, { - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + /// Create a new instance + pub fn new

(inner: P) -> Self + where + P: PayloadStoreExt + 'static, + { + Self { inner: Arc::new(inner) } } } @@ -96,7 +102,7 @@ where T: PayloadTypes, { fn from(inner: PayloadBuilderHandle) -> Self { - Self { inner } + Self::new(inner) } } diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 7013d9fd913..3604ff5d8d8 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -20,7 +20,7 @@ pub use crate::events::{Events, PayloadEvents}; mod traits; pub use traits::{ BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilder, - PayloadBuilderAttributes, + PayloadBuilderAttributes, PayloadStoreExt, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 86f04a5b550..197a7fe3af9 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -7,11 +7,12 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; use reth_primitives::SealedBlock; +use std::fmt::Debug; use tokio::sync::oneshot; /// A type that can request, subscribe to and resolve payloads. #[async_trait::async_trait] -pub trait PayloadBuilder: Send + Sync + Unpin { +pub trait PayloadBuilder: Debug + Send + Sync + Unpin { /// The Payload type for the builder. type PayloadType: PayloadTypes; /// The error type returned by the builder. @@ -58,6 +59,63 @@ pub trait PayloadBuilder: Send + Sync + Unpin { ) -> Option::PayloadBuilderAttributes, Self::Error>>; } +/// A helper trait for internal usage to retrieve and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve(&self, id: PayloadId) -> Option> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option>; +} + +#[async_trait::async_trait] +impl PayloadStoreExt for P +where + P: PayloadBuilder, +{ + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) + } + + async fn best_payload( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) + } + + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) + } +} + /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. pub trait BuiltPayload: Send + Sync + std::fmt::Debug { From a620d7c2f1b23c26973b6b07946ea7571bfc2efa Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Nov 2024 14:00:06 +0100 Subject: [PATCH 428/970] chore(deps): bump alloy to 0.6.3 (#12468) --- Cargo.lock | 182 ++++++++++++----------- Cargo.toml | 56 +++---- crates/primitives/src/transaction/mod.rs | 13 ++ 3 files changed, 137 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd3a93eda7a..798852f8061 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611cc2ae7d2e242c457e4be7f97036b8ad9ca152b499f53faf99b1ed8fc2553f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-chains" @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b19fd285b55dd39ae0dbc37481ad9f5f48898726f76335a2d6167a85a5fa41da" +checksum = "ef11c6b2dfbf77dca7bafc6759860391395f07c04d5486f2a2e2563d2961639b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f42b1cb3fa8cba51b45795097a0d58a34569ca5db9eda48f63230e22fbc5cb5" +checksum = "8faa407ef916bfe0677c52c9b2258ce0698c53e9e15a837d1501e3ae9e57421a" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21aff0f2c921246398cad88e32a1d8ec14359b183afbc3dcb816873714cafc1a" +checksum = "33d6c0c1744a7af7d325dca6b5c5bb431a6307c0961088f7a236ca2694c4a87e" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76d899cfbfa13c5ed044383b7ae0e6a4d6ffcad3fd25e4acf71ff1c255ddae0" +checksum = "95a5a0a01ef6ec3cd3ebd52a7b3bc7f8a92b23e478e69c07abd94abf05e6b48e" dependencies = [ "alloy-primitives", "alloy-serde", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e244937365749c09c403d3054de39cc7dd46e3c3a12e5b164106af4903011ab1" +checksum = "65fd0e2cff5ab68defc5050ff9e81cb053c5b52cf4809fc8786664898e29ae75" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a28811461dc37e28db92b6d3a8c03a5883f2100b270a6294af00710bf4a0be4" +checksum = "96c9eca0c04ca8a663966ce7f5b19c03927f2b4d82910cb76cb4008490cfa838" dependencies = [ "alloy-consensus", "alloy-eips", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e517c44a97e753f10dc0736215ba4677da5e2fbc1451e3e76902e02cd6cff12" +checksum = "e4c3050f19dc93a7f09fef670c8db04a15e7e2901494ca40decbce323be69643" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf1a4b35b071c2d6f21fd3d32b8c5466cb7ed31fd4a4473a4e2ce180729121" +checksum = "b5ebd44d0ab30f1018dc1ff01686ea1a3ae732601841a4fb277c9d0b3a34bf50" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -341,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56befb85784c7eb4f163b9aed7cdcaba09d5b07f8e59d6c12ad0ce1acf67c0fd" +checksum = "df8e5a28e7c4c04afc0f20b2aecf6f9214d6cfd5009187c0b8616a8f8918739c" dependencies = [ "alloy-chains", "alloy-consensus", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6480f9596064db2ca8e1a4b710ea9a4ef420534e68640296a461b71f6bfadc1" +checksum = "365dd813ec271a14febc31ea8ed64185856534f5644511f0c7a2961db060d878" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +423,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb49d38b3279a07e864d973323534a2c4a845e16f2c0153a509a3abcc01da7b1" +checksum = "0336362936bb9fef88f27d51f2ede8c15cdfdb7f81b042e74257770052547101" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90be9542c6c9bb0d21ac08104ca0a3d1fb83e56f1c704f5cdcf6fb9e01fcbd75" +checksum = "ac9a46bc01bc27dbf4dd27d46986eda661ffe99e78aea3078a77b8c064072b01" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410e7b9d67489d19ad52439b940fbf482e0823190d8245242bfff1eec44290d5" +checksum = "82845a6f1ed33ef4edf79aa7cb091df31a532675921fb85041fbd8d6e029093d" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +473,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "951f9106bb02ad00a2dc2eb7b400041a2c073d7fb8f33e2f1f29b2f71564f3f7" +checksum = "0e73c06c3e44866d304fe28e8cebc8354f99fe405cc7c9bd23ed92eaebca3c07" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dab9821d5a73f56512ddd8e3db89a5bbb285353129b271c4ad6803a37c4e00ce" +checksum = "2f9f6f071674c62424b62e22307aa83a35a0b1b84820649cc82034a50389ddc6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe68f35cafc465442862421ae2d123bb58c8df25f837d8866bf5fc278b74a52" +checksum = "63a857818fe47dacaa7cc7a9cdcfee212cf1ebf119ab7bd157065d434671892d" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed9e7b3233cb3e0aaeaedc4e21e1ea9d99e947a7206241a9f9521c138193978" +checksum = "2ee44332315ef1adde384e44db3b5724d74d0cd0e0856a681c4db2b4da3a423e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be10f130b8be7c2351a3ea64b4bf07020fde5be8d1ac18db9a9a3496aa22bb19" +checksum = "d58fa055e02d04bc70443ecce984951fb5be02d2c843c640ca48237cdec66af1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -551,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "110f7dbee6f047915eb8915751d96402f6d02cb6e5f64286f10949eaa5bed841" +checksum = "debf779b847b058b7c9cdef576f5ef539bc3032c5f6e5c1c2f51820b4f74e6d9" dependencies = [ "alloy-eips", "alloy-primitives", @@ -564,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f7f183d06db1457b58c6d618ff7ab92c97810138c148e09edb14ed2001069" +checksum = "1319edeae0e5f453424d658f8f450a5b1090b9ee6c0c014dc216b42f11c9dc57" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -578,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f85580d4e78ffd765086ebf640004a773e3c335ebbfaa5666e13a0640c4957fe" +checksum = "5fcb4b823dcd7228a89be1be85a4fa8008ad6d91b169b61f75f36b6e7386f37b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1493df14770a23b1e32d22c66fa22508d09e0a99d6923a45f179ff7887ca0cef" +checksum = "feafd71e0e252b063fe4b07962beedf0445e66b07b4b44af178863d21e75b0fa" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,9 +602,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebff64a3b4062eba217404700d1517b9bf3ff9a7a5b2dd03f1cf8aeec3e9a6b8" +checksum = "ebad84d52550351438ec7f151dbc551f870c31eecf23b473df5b779a91eee8ca" dependencies = [ "alloy-primitives", "async-trait", @@ -616,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc1f6602be452e3bb5b6c2fe0fa0f966465f9e9bfd6ad7691bfe1bd8b74bf432" +checksum = "ed742d76943b5ebaabfdf3d0d8b69a4377fc2981c7955a807e33a3469aed0cdc" dependencies = [ "alloy-consensus", "alloy-network", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64534da7f71ecca86b3449adec19b7942fb0905b9f392f60054a02a5f686f71f" +checksum = "da63700a2b3176b3009a6d3672d0c657280a517dcec7659c991c55e863a83165" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617b5ab96f4fb64ef697a84c68ec8534c062baafbdb0529c34aaee43324f0d5a" +checksum = "6613c3abc567b710217d241650ef73cfb8df9bcdc2ef23fdedabf363637e2a00" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10043df9ea36e3a38056cdfc3a70138343caef4eec6df66d6cbfdd348d245828" +checksum = "7087a28734aac88a606884cdde8c89ad053bd1c0580c787e31f917a8e4a7cbdd" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a43ecdbc8f79cb5d7f54e2118626f873ded93c8c040fb714ce6be47dc5b526" +checksum = "672797b3f7bcbe67f712f9e8e5703b22f24594bd2b248a90916bdb58811b8b6e" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -1530,7 +1530,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "serde", ] @@ -1651,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.37" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" +checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" dependencies = [ "jobserver", "libc", @@ -1998,9 +1998,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -4111,6 +4111,12 @@ dependencies = [ "serde", ] +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + [[package]] name = "infer" version = "0.2.3" @@ -4167,10 +4173,14 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +checksum = "b829f37dead9dc39df40c2d3376c179fdfd2ac771f53f55d3c30dc096a3c0c6e" dependencies = [ + "darling", + "indoc", + "pretty_assertions", + "proc-macro2", "quote", "syn 2.0.87", ] @@ -5287,9 +5297,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33097177de330b1a83e0a882ae752ad55f23962b1e310176d1623655c18421e" +checksum = "3b5745eca869a0b476fbd34025ac40c06a15c46ffc10d6b1c40d21475b05f835" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5305,9 +5315,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2232ff799352932fc5484e1c63ee7bb1e74a79ac7b94a4f7318560fba21167de" +checksum = "aa6b2f26a84984213bc12649dfd8466a46ddeede3b8d2d936583000a8362b117" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5319,9 +5329,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1021b644a8f0bf8d7f878aa5328da67c7d697e476c8e097d09e05585067713" +checksum = "67085a07a35e71db0a95ac923a2de2c186a37c5f376a1e4dee19b5ef8a6ffcaa" dependencies = [ "alloy-consensus", "alloy-network", @@ -5334,9 +5344,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a566c421638a3b655a2aaf59fbbdee017a7dce6acfbacead219861e14654b98d" +checksum = "880331b1b7718236a016eb7ac5530abcf7d5ca8b7ad78ac6c3dc8f73826ce9ee" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5354,9 +5364,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72298f3f9084773dc3feaf88b08db82ceb3e3e13f98280459d869accb3f14234" +checksum = "69b75a52c8659756cfe1119f7711e94749c8dec6ad82408f3c55641ae413fb83" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5373,9 +5383,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a270e6370a0fa8a673e29bcd436cbb67b5dc88cefc1d00fbf2382673894f71" +checksum = "622eabdff1739ef163aeb8e8385d5936fa54c14cfa55b06f72f1c8faa987f715" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6225,7 +6235,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -6240,9 +6250,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -9660,9 +9670,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -9950,18 +9960,18 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 7608756b12a..b06380de8d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -431,40 +431,40 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.2", default-features = false } -alloy-contract = { version = "0.6.2", default-features = false } -alloy-eips = { version = "0.6.2", default-features = false } -alloy-genesis = { version = "0.6.2", default-features = false } -alloy-json-rpc = { version = "0.6.2", default-features = false } -alloy-network = { version = "0.6.2", default-features = false } -alloy-network-primitives = { version = "0.6.2", default-features = false } -alloy-node-bindings = { version = "0.6.2", default-features = false } -alloy-provider = { version = "0.6.2", features = [ +alloy-consensus = { version = "0.6.3", default-features = false } +alloy-contract = { version = "0.6.3", default-features = false } +alloy-eips = { version = "0.6.3", default-features = false } +alloy-genesis = { version = "0.6.3", default-features = false } +alloy-json-rpc = { version = "0.6.3", default-features = false } +alloy-network = { version = "0.6.3", default-features = false } +alloy-network-primitives = { version = "0.6.3", default-features = false } +alloy-node-bindings = { version = "0.6.3", default-features = false } +alloy-provider = { version = "0.6.3", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.2", default-features = false } -alloy-rpc-client = { version = "0.6.2", default-features = false } -alloy-rpc-types = { version = "0.6.2", features = [ +alloy-pubsub = { version = "0.6.3", default-features = false } +alloy-rpc-client = { version = "0.6.3", default-features = false } +alloy-rpc-types = { version = "0.6.3", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.2", default-features = false } -alloy-rpc-types-debug = { version = "0.6.2", default-features = false } -alloy-rpc-types-engine = { version = "0.6.2", default-features = false } -alloy-rpc-types-eth = { version = "0.6.2", default-features = false } -alloy-rpc-types-mev = { version = "0.6.2", default-features = false } -alloy-rpc-types-trace = { version = "0.6.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.2", default-features = false } -alloy-serde = { version = "0.6.2", default-features = false } -alloy-signer = { version = "0.6.2", default-features = false } -alloy-signer-local = { version = "0.6.2", default-features = false } -alloy-transport = { version = "0.6.2" } -alloy-transport-http = { version = "0.6.2", features = [ +alloy-rpc-types-admin = { version = "0.6.3", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.3", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.3", default-features = false } +alloy-rpc-types-debug = { version = "0.6.3", default-features = false } +alloy-rpc-types-engine = { version = "0.6.3", default-features = false } +alloy-rpc-types-eth = { version = "0.6.3", default-features = false } +alloy-rpc-types-mev = { version = "0.6.3", default-features = false } +alloy-rpc-types-trace = { version = "0.6.3", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.3", default-features = false } +alloy-serde = { version = "0.6.3", default-features = false } +alloy-signer = { version = "0.6.3", default-features = false } +alloy-signer-local = { version = "0.6.3", default-features = false } +alloy-transport = { version = "0.6.3" } +alloy-transport-http = { version = "0.6.3", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.2", default-features = false } -alloy-transport-ws = { version = "0.6.2", default-features = false } +alloy-transport-ipc = { version = "0.6.3", default-features = false } +alloy-transport-ws = { version = "0.6.3", default-features = false } # op op-alloy-rpc-types = "0.6.3" diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 485de92ea89..366f59696b2 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -751,6 +751,15 @@ impl alloy_consensus::Transaction for Transaction { } } + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(_) | Self::Eip2930(_) => false, + Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, + #[cfg(feature = "optimism")] + Self::Deposit(_) => false, + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -1444,6 +1453,10 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().priority_fee_or_price() } + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + fn value(&self) -> U256 { self.deref().value() } From 3a337cd7d4ad8c14569d20141fdcc2b1c54dbdf3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Nov 2024 15:04:07 +0100 Subject: [PATCH 429/970] chore(deps): Update op-alloy to 0.6.4 in manifest (#12472) --- Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b06380de8d7..cd7054ea1e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -467,10 +467,10 @@ alloy-transport-ipc = { version = "0.6.3", default-features = false } alloy-transport-ws = { version = "0.6.3", default-features = false } # op -op-alloy-rpc-types = "0.6.3" -op-alloy-rpc-types-engine = "0.6.3" -op-alloy-network = "0.6.3" -op-alloy-consensus = "0.6.3" +op-alloy-rpc-types = "0.6.4" +op-alloy-rpc-types-engine = "0.6.4" +op-alloy-network = "0.6.4" +op-alloy-consensus = "0.6.4" # misc aquamarine = "0.6" From aece53ae88990bb8d0849b60e9649f68781dd03e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 12 Nov 2024 19:13:21 +0400 Subject: [PATCH 430/970] feat: make downloaders and clients generic over block parts (#12469) Co-authored-by: Matthias Seitz --- Cargo.lock | 6 +- bin/reth/Cargo.toml | 1 + bin/reth/src/commands/debug_cmd/execution.rs | 11 +- .../commands/debug_cmd/in_memory_merkle.rs | 4 +- crates/consensus/beacon/src/engine/mod.rs | 8 +- crates/consensus/beacon/src/engine/sync.rs | 8 +- .../consensus/beacon/src/engine/test_utils.rs | 12 +- crates/engine/service/src/service.rs | 8 +- crates/engine/tree/src/download.rs | 12 +- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/bodies/bodies.rs | 48 +++--- crates/net/downloaders/src/bodies/noop.rs | 5 +- crates/net/downloaders/src/bodies/queue.rs | 7 +- crates/net/downloaders/src/bodies/request.rs | 20 ++- crates/net/downloaders/src/bodies/task.rs | 30 ++-- crates/net/downloaders/src/file_client.rs | 2 + crates/net/downloaders/src/headers/noop.rs | 5 +- .../src/headers/reverse_headers.rs | 161 ++++++++++-------- crates/net/downloaders/src/headers/task.rs | 40 +++-- .../src/test_utils/bodies_client.rs | 1 + crates/net/eth-wire-types/Cargo.toml | 1 + crates/net/eth-wire-types/src/message.rs | 2 +- crates/net/eth-wire-types/src/primitives.rs | 4 +- crates/net/eth-wire/src/capability.rs | 11 +- crates/net/eth-wire/src/ethstream.rs | 102 ++++++----- crates/net/eth-wire/src/multiplex.rs | 16 +- crates/net/network-api/src/downloaders.rs | 7 +- crates/net/network-api/src/events.rs | 69 ++++++-- crates/net/network-api/src/lib.rs | 5 +- crates/net/network/Cargo.toml | 1 + crates/net/network/src/fetch/client.rs | 18 +- crates/net/network/src/fetch/mod.rs | 54 +++--- crates/net/network/src/message.rs | 27 +-- crates/net/network/src/network.rs | 9 +- crates/net/network/src/state.rs | 29 ++-- crates/net/p2p/Cargo.toml | 4 +- crates/net/p2p/src/bodies/client.rs | 13 +- crates/net/p2p/src/bodies/downloader.rs | 9 +- crates/net/p2p/src/bodies/response.rs | 11 +- crates/net/p2p/src/either.rs | 6 +- crates/net/p2p/src/error.rs | 9 +- crates/net/p2p/src/full_block.rs | 92 +++++----- crates/net/p2p/src/headers/client.rs | 10 +- crates/net/p2p/src/headers/downloader.rs | 25 ++- crates/net/p2p/src/headers/error.rs | 8 +- crates/net/p2p/src/lib.rs | 11 ++ crates/net/p2p/src/test_utils/bodies.rs | 1 + crates/net/p2p/src/test_utils/full_block.rs | 4 + crates/net/p2p/src/test_utils/headers.rs | 5 +- crates/node/builder/src/launch/common.rs | 2 +- crates/node/builder/src/setup.rs | 8 +- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/node_config.rs | 11 +- crates/node/core/src/utils.rs | 21 ++- crates/primitives-traits/src/block/header.rs | 6 +- crates/primitives-traits/src/header/sealed.rs | 4 +- crates/primitives-traits/src/size.rs | 6 + crates/primitives/src/block.rs | 3 +- crates/stages/stages/src/stages/bodies.rs | 9 +- crates/stages/stages/src/stages/headers.rs | 14 +- 60 files changed, 631 insertions(+), 409 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 798852f8061..4792cb094de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6391,6 +6391,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-revm", @@ -7029,6 +7030,7 @@ dependencies = [ name = "reth-downloaders" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7829,6 +7831,7 @@ dependencies = [ name = "reth-network-p2p" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -7997,7 +8000,7 @@ dependencies = [ "reth-chainspec", "reth-cli-util", "reth-config", - "reth-consensus-common", + "reth-consensus", "reth-db", "reth-discv4", "reth-discv5", @@ -8006,6 +8009,7 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-rpc-eth-types", "reth-rpc-server-types", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ffd1998b24e..a152bea2681 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-ethereum-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index cc584c89287..9056d3424c7 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -21,7 +21,7 @@ use reth_downloaders::{ use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; +use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ @@ -68,7 +68,7 @@ impl> Command { static_file_producer: StaticFileProducer>, ) -> eyre::Result> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) @@ -137,11 +137,14 @@ impl> Command { Ok(network) } - async fn fetch_block_hash( + async fn fetch_block_hash( &self, client: Client, block: BlockNumber, - ) -> eyre::Result { + ) -> eyre::Result + where + Client: HeadersClient, + { info!(target: "reth::cli", ?block, "Fetching block from the network."); loop { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 2c56da9b4cf..bc36578a327 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,6 +7,7 @@ use crate::{ use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; @@ -124,7 +125,8 @@ impl> Command { let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); - let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) + let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); + let block = (move || get_single_body(client.clone(), header.clone(), consensus.clone())) .retry(backoff) .notify( |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 65904196e1c..8d385a64b8e 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -14,7 +14,7 @@ use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes} use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, - BlockClient, + EthBlockClient, }; use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; @@ -174,7 +174,7 @@ type PendingForkchoiceUpdate = pub struct BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient, + Client: EthBlockClient, BT: BlockchainTreeEngine + BlockReader + BlockIdReader @@ -237,7 +237,7 @@ where + StageCheckpointReader + ChainSpecProvider + 'static, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -1799,7 +1799,7 @@ where impl Future for BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, BT: BlockchainTreeEngine + BlockReader + BlockIdReader diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9426ca19712..17d5d2281a3 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -8,7 +8,7 @@ use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + EthBlockClient, }; use reth_primitives::SealedBlock; use reth_provider::providers::ProviderNodeTypes; @@ -34,7 +34,7 @@ use tracing::trace; pub(crate) struct EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient, + Client: EthBlockClient, { /// A downloader that can download full blocks from the network. full_block_client: FullBlockClient, @@ -64,7 +64,7 @@ where impl EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance pub(crate) fn new( @@ -522,7 +522,7 @@ mod tests { ) -> EngineSyncController> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let client = self .client diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 6e03aebfa8d..3c69e7f55c3 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -24,7 +24,9 @@ use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, BlockClient}; +use reth_network_p2p::{ + sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, +}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedHeader; use reth_provider::{ @@ -237,7 +239,7 @@ impl TestConsensusEngineBuilder { client: Client, ) -> NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } } @@ -264,7 +266,7 @@ pub struct NetworkedTestConsensusEngineBuilder { impl NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Set the pipeline execution outputs to use for the test consensus engine. #[allow(dead_code)] @@ -319,7 +321,7 @@ where client: ClientType, ) -> NetworkedTestConsensusEngineBuilder where - ClientType: BlockClient + 'static, + ClientType: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } } @@ -450,7 +452,7 @@ pub fn spawn_consensus_engine( engine: TestBeaconConsensusEngine, ) -> oneshot::Receiver> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 198438d457f..d383af6caa6 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -15,7 +15,7 @@ pub use reth_engine_tree::{ engine::EngineApiEvent, }; use reth_evm::execute::BlockExecutorProvider; -use reth_network_p2p::BlockClient; +use reth_network_p2p::EthBlockClient; use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; @@ -49,7 +49,7 @@ type EngineServiceType = ChainOrchestrator< pub struct EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -59,7 +59,7 @@ where impl EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. @@ -124,7 +124,7 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { type Item = ChainEvent; diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 9ecec70ae36..667808a4d62 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -6,12 +6,13 @@ use futures::FutureExt; use reth_consensus::Consensus; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + BlockClient, EthBlockClient, }; use reth_primitives::{SealedBlock, SealedBlockWithSenders}; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, + fmt::Debug, sync::Arc, task::{Context, Poll}, }; @@ -72,10 +73,13 @@ where impl BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), inflight_full_block_requests: Vec::new(), @@ -182,7 +186,7 @@ where impl BlockDownloader for BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient, { /// Handles incoming download actions. fn on_action(&mut self, action: DownloadAction) { diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 69a59f698de..38e46bb6011 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -28,6 +28,7 @@ reth-db-api = { workspace = true, optional = true } reth-testing-utils = { workspace = true, optional = true } # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index af113fdb38b..02a36c8e8cd 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -37,7 +37,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -57,16 +57,16 @@ pub struct BodiesDownloader { /// Requests in progress in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } impl BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. @@ -191,14 +191,14 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option { + fn pop_buffered_response(&mut self) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -208,10 +208,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -225,7 +225,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -251,7 +251,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -283,12 +283,12 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with(self, spawner: &S) -> TaskDownloader<::Body> where S: TaskSpawner, { @@ -298,9 +298,11 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { + type Body = B::Body; + /// Set a new download range (exclusive). /// /// This method will drain all queued bodies, filter out ones outside the range and put them @@ -346,10 +348,10 @@ where impl Stream for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -431,13 +433,13 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -468,21 +470,21 @@ impl OrderedBodiesResponse { } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -562,7 +564,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index e70c534a0e3..494a5f2ef2e 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,6 +4,7 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; +use reth_primitives::BlockBody; use std::ops::RangeInclusive; /// A [`BodyDownloader`] implementation that does nothing. @@ -12,13 +13,15 @@ use std::ops::RangeInclusive; pub struct NoopBodiesDownloader; impl BodyDownloader for NoopBodiesDownloader { + type Body = BlockBody; + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } impl Stream for NoopBodiesDownloader { - type Item = Result, DownloadError>; + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index db7ff71cfc9..54404d0da38 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ error::DownloadResult, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::InMemorySize; use std::{ pin::Pin, sync::Arc, @@ -57,7 +58,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc, + consensus: Arc>, request: Vec, ) { // Set last max requested block number @@ -77,9 +78,9 @@ where impl Stream for BodiesRequestQueue where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Item = DownloadResult>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 5ab44ed0811..7b99c81d89e 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -39,7 +39,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -47,7 +47,7 @@ pub(crate) struct BodiesRequestFuture { // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks - buffer: Vec, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, @@ -60,7 +60,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -115,7 +115,10 @@ where /// Process block response. /// Returns an error if the response is invalid. - fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> { + fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let (peer_id, bodies) = response.split(); let request_len = self.last_request_len.unwrap_or_default(); let response_len = bodies.len(); @@ -158,7 +161,10 @@ where /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. - fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> { + fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let bodies_capacity = bodies.capacity(); let bodies_len = bodies.len(); let mut bodies = bodies.into_iter().peekable(); @@ -208,9 +214,9 @@ where impl Future for BodiesRequestFuture where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Output = DownloadResult>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index eeafb7ab121..2caf3199188 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -23,15 +23,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -45,12 +45,16 @@ impl TaskDownloader { /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; + /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; /// use std::sync::Arc; /// - /// fn t( + /// fn t< + /// B: BodiesClient + 'static, + /// Provider: HeaderProvider + Unpin + 'static, + /// >( /// client: Arc, - /// consensus: Arc, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); @@ -59,7 +63,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -68,7 +72,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -86,15 +90,17 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl BodyDownloader for TaskDownloader { + type Body = B; + fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { let _ = self.to_downloader.send(range); Ok(()) } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -102,9 +108,9 @@ impl Stream for TaskDownloader { } /// A [`BodyDownloader`] that runs on its own task -struct SpawnedDownloader { +struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 5b21c82fb3f..df35146e940 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -265,6 +265,7 @@ impl FromReader for FileClient { } impl HeadersClient for FileClient { + type Header = Header; type Output = HeadersFut; fn get_headers_with_priority( @@ -315,6 +316,7 @@ impl HeadersClient for FileClient { } impl BodiesClient for FileClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 210655f7e26..58da7312387 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, @@ -11,6 +12,8 @@ use reth_primitives::SealedHeader; pub struct NoopHeaderDownloader; impl HeaderDownloader for NoopHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} @@ -19,7 +22,7 @@ impl HeaderDownloader for NoopHeaderDownloader { } impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError>; + type Item = Result, HeadersDownloaderError

>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index f0c28dc5d9f..9532b4b3a35 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -2,6 +2,7 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, Sealable, B256}; use futures::{stream::Stream, FutureExt}; @@ -19,7 +20,7 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{GotExpected, Header, SealedHeader}; +use reth_primitives::{GotExpected, SealedHeader}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, @@ -39,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5; /// Wrapper for internal downloader errors. #[derive(Error, Debug)] -enum ReverseHeadersDownloaderError { +enum ReverseHeadersDownloaderError { #[error(transparent)] - Downloader(#[from] HeadersDownloaderError), + Downloader(#[from] HeadersDownloaderError), #[error(transparent)] Response(#[from] Box), } -impl From for ReverseHeadersDownloaderError { +impl From for ReverseHeadersDownloaderError { fn from(value: HeadersResponseError) -> Self { Self::Response(Box::new(value)) } @@ -66,17 +67,17 @@ impl From for ReverseHeadersDownloaderError { #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. - local_head: Option, + local_head: Option>, /// Block we want to close the gap to. sync_target: Option, /// The block number to use for requests. next_request_block_number: u64, /// Keeps track of the block we need to validate next. - lowest_validated_header: Option, + lowest_validated_header: Option>, /// Tip block number to start validating from (in reverse) next_chain_tip_block_number: u64, /// The batch size per one request @@ -97,11 +98,11 @@ pub struct ReverseHeadersDownloader { /// requests in progress in_progress_queue: FuturesUnordered>, /// Buffered, unvalidated responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Buffered, _sorted_ and validated headers ready to be returned. /// /// Note: headers are sorted from high to low - queued_validated_headers: Vec, + queued_validated_headers: Vec>, /// Header downloader metrics. metrics: HeaderDownloaderMetrics, } @@ -110,7 +111,7 @@ pub struct ReverseHeadersDownloader { impl ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { /// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it pub fn builder() -> ReverseHeadersDownloaderBuilder { @@ -120,7 +121,7 @@ where /// Returns the block number the local node is at. #[inline] fn local_block_number(&self) -> Option { - self.local_head.as_ref().map(|h| h.number) + self.local_head.as_ref().map(|h| h.number()) } /// Returns the existing local head block number @@ -130,7 +131,7 @@ where /// If the local head has not been set. #[inline] fn existing_local_block_number(&self) -> BlockNumber { - self.local_head.as_ref().expect("is initialized").number + self.local_head.as_ref().expect("is initialized").number() } /// Returns the existing sync target. @@ -197,14 +198,14 @@ where /// `lowest_validated_header`. /// /// This only returns `None` if we haven't fetched the initial chain tip yet. - fn lowest_validated_header(&self) -> Option<&SealedHeader> { + fn lowest_validated_header(&self) -> Option<&SealedHeader> { self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref()) } /// Validate that the received header matches the expected sync target. fn validate_sync_target( &self, - header: &SealedHeader, + header: &SealedHeader, request: HeadersRequest, peer_id: PeerId, ) -> Result<(), Box> { @@ -220,12 +221,12 @@ where ), })) } - SyncTargetBlock::Number(number) if header.number != number => { + SyncTargetBlock::Number(number) if header.number() != number => { Err(Box::new(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: header.number, + got: header.number(), expected: number, }), })) @@ -244,9 +245,9 @@ where fn process_next_headers( &mut self, request: HeadersRequest, - headers: Vec
, + headers: Vec, peer_id: PeerId, - ) -> Result<(), ReverseHeadersDownloaderError> { + ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); let sealed_headers = headers @@ -280,17 +281,17 @@ where if let Some((last_header, head)) = validated .last_mut() .zip(self.local_head.as_ref()) - .filter(|(last, head)| last.number == head.number + 1) + .filter(|(last, head)| last.number() == head.number() + 1) { // Every header must be valid on its own - if let Err(error) = self.consensus.validate_header(last_header) { + if let Err(error) = self.consensus.validate_header(&*last_header) { trace!(target: "downloaders::headers", %error, "Failed to validate header"); return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeaderValidation { hash: head.hash(), - number: head.number, + number: head.number(), error: Box::new(error), }, } @@ -299,9 +300,9 @@ where // If the header is valid on its own, but not against its parent, we return it as // detached head error. - if let Err(error) = self.consensus.validate_header_against_parent(last_header, head) { + if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) { // Replace the last header with a detached variant - error!(target: "downloaders::headers", %error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); + error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); return Err(HeadersDownloaderError::DetachedHead { local_head: Box::new(head.clone()), header: Box::new(last_header.clone()), @@ -313,7 +314,7 @@ where // update tracked block info (falling block number) self.next_chain_tip_block_number = - validated.last().expect("exists").number.saturating_sub(1); + validated.last().expect("exists").number().saturating_sub(1); self.queued_validated_headers.extend(validated); Ok(()) @@ -345,7 +346,7 @@ where let skip = self .queued_validated_headers .iter() - .take_while(|last| last.number > target_block_number) + .take_while(|last| last.number() > target_block_number) .count(); // removes all headers that are higher than current target self.queued_validated_headers.drain(..skip); @@ -360,8 +361,8 @@ where /// Handles the response for the request for the sync target fn on_sync_target_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let sync_target = self.existing_sync_target(); let HeadersRequestOutcome { request, outcome } = response; match outcome { @@ -372,7 +373,7 @@ where self.metrics.total_downloaded.increment(headers.len() as u64); // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); if headers.is_empty() { return Err(HeadersResponseError { @@ -401,12 +402,12 @@ where } } SyncTargetBlock::Number(number) => { - if target.number != number { + if target.number() != number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: target.number, + got: target.number(), expected: number, }), } @@ -415,17 +416,17 @@ where } } - trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number, "Received sync target"); + trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number(), "Received sync target"); // This is the next block we need to start issuing requests from - let parent_block_number = target.number.saturating_sub(1); - self.on_block_number_update(target.number, parent_block_number); + let parent_block_number = target.number().saturating_sub(1); + self.on_block_number_update(target.number(), parent_block_number); self.queued_validated_headers.push(target); // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; Ok(()) @@ -439,8 +440,8 @@ where /// Invoked when we received a response fn on_headers_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let requested_block_number = response.block_number(); let HeadersRequestOutcome { request, outcome } = response; @@ -475,19 +476,19 @@ where } // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); // validate the response let highest = &headers[0]; - trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number, "Validating non-empty headers response"); + trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number(), "Validating non-empty headers response"); - if highest.number != requested_block_number { + if highest.number() != requested_block_number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeadersResponseStartBlockMismatch(GotExpected { - got: highest.number, + got: highest.number(), expected: requested_block_number, }), } @@ -495,14 +496,14 @@ where } // check if the response is the next expected - if highest.number == self.next_chain_tip_block_number { + if highest.number() == self.next_chain_tip_block_number { // is next response, validate it self.process_next_headers(request, headers, peer_id)?; // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; - } else if highest.number > self.existing_local_block_number() { + } else if highest.number() > self.existing_local_block_number() { self.metrics.buffered_responses.increment(1.); // can't validate yet self.buffered_responses.push(OrderedHeadersResponse { @@ -549,7 +550,7 @@ where /// Attempts to validate the buffered responses /// /// Returns an error if the next expected response was popped, but failed validation. - fn try_validate_buffered(&mut self) -> Option { + fn try_validate_buffered(&mut self) -> Option> { loop { // Check to see if we've already received the next value let next_response = self.buffered_responses.peek_mut()?; @@ -598,7 +599,11 @@ where } /// Validate whether the header is valid in relation to it's parent - fn validate(&self, header: &SealedHeader, parent: &SealedHeader) -> DownloadResult<()> { + fn validate( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> DownloadResult<()> { validate_header_download(&self.consensus, header, parent) } @@ -614,7 +619,7 @@ where } /// Splits off the next batch of headers - fn split_next_batch(&mut self) -> Vec { + fn split_next_batch(&mut self) -> Vec> { let batch_size = self.stream_batch_size.min(self.queued_validated_headers.len()); let mut rem = self.queued_validated_headers.split_off(batch_size); std::mem::swap(&mut rem, &mut self.queued_validated_headers); @@ -644,12 +649,15 @@ where Self: HeaderDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Header> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header> where S: TaskSpawner, { @@ -659,11 +667,17 @@ where impl HeaderDownloader for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - fn update_local_head(&mut self, head: SealedHeader) { + type Header = H::Header; + + fn update_local_head(&mut self, head: SealedHeader) { // ensure we're only yielding headers that are in range and follow the current local head. - while self.queued_validated_headers.last().is_some_and(|last| last.number <= head.number) { + while self + .queued_validated_headers + .last() + .is_some_and(|last| last.number() <= head.number()) + { // headers are sorted high to low self.queued_validated_headers.pop(); } @@ -686,7 +700,7 @@ where .queued_validated_headers .first() .filter(|h| h.hash() == tip) - .map(|h| h.number) + .map(|h| h.number()) { self.sync_target = Some(new_sync_target.with_number(target_number)); return @@ -740,9 +754,9 @@ where impl Stream for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult>, H::Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -883,18 +897,18 @@ where } } -/// A future that returns a list of [`Header`] on success. +/// A future that returns a list of headers on success. #[derive(Debug)] struct HeadersRequestFuture { request: Option, fut: F, } -impl Future for HeadersRequestFuture +impl Future for HeadersRequestFuture where - F: Future>> + Sync + Send + Unpin, + F: Future>> + Sync + Send + Unpin, { - type Output = HeadersRequestOutcome; + type Output = HeadersRequestOutcome; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -906,14 +920,14 @@ where } /// The outcome of the [`HeadersRequestFuture`] -struct HeadersRequestOutcome { +struct HeadersRequestOutcome { request: HeadersRequest, - outcome: PeerRequestResult>, + outcome: PeerRequestResult>, } // === impl OrderedHeadersResponse === -impl HeadersRequestOutcome { +impl HeadersRequestOutcome { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } @@ -921,35 +935,35 @@ impl HeadersRequestOutcome { /// Wrapper type to order responses #[derive(Debug)] -struct OrderedHeadersResponse { - headers: Vec
, +struct OrderedHeadersResponse { + headers: Vec, request: HeadersRequest, peer_id: PeerId, } // === impl OrderedHeadersResponse === -impl OrderedHeadersResponse { +impl OrderedHeadersResponse { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } } -impl PartialEq for OrderedHeadersResponse { +impl PartialEq for OrderedHeadersResponse { fn eq(&self, other: &Self) -> bool { self.block_number() == other.block_number() } } -impl Eq for OrderedHeadersResponse {} +impl Eq for OrderedHeadersResponse {} -impl PartialOrd for OrderedHeadersResponse { +impl PartialOrd for OrderedHeadersResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedHeadersResponse { +impl Ord for OrderedHeadersResponse { fn cmp(&self, other: &Self) -> Ordering { self.block_number().cmp(&other.block_number()) } @@ -1156,7 +1170,11 @@ impl ReverseHeadersDownloaderBuilder { /// Build [`ReverseHeadersDownloader`] with provided consensus /// and header client implementations - pub fn build(self, client: H, consensus: Arc) -> ReverseHeadersDownloader + pub fn build( + self, + client: H, + consensus: Arc>, + ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, { @@ -1214,6 +1232,7 @@ fn calc_next_request( mod tests { use super::*; use crate::headers::test_utils::child_header; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; @@ -1296,7 +1315,7 @@ mod tests { assert!(downloader.sync_target_request.is_some()); downloader.sync_target_request.take(); - let target = SyncTarget::Gap(SealedHeader::new(Header::default(), B256::random())); + let target = SyncTarget::Gap(SealedHeader::new(Default::default(), B256::random())); downloader.update_sync_target(target); assert!(downloader.sync_target_request.is_none()); assert_matches!( @@ -1373,7 +1392,7 @@ mod tests { fn test_resp_order() { let mut heap = BinaryHeap::new(); let hi = 1u64; - heap.push(OrderedHeadersResponse { + heap.push(OrderedHeadersResponse::
{ headers: vec![], request: HeadersRequest { start: hi.into(), limit: 0, direction: Default::default() }, peer_id: Default::default(), diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index b3fa27fde59..81c4cd80da3 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -22,15 +22,15 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8; /// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>>, - to_downloader: UnboundedSender, + from_downloader: ReceiverStream>, H>>, + to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] /// that's connected to that task. /// @@ -46,7 +46,8 @@ impl TaskDownloader { /// # use reth_downloaders::headers::task::TaskDownloader; /// # use reth_consensus::Consensus; /// # use reth_network_p2p::headers::client::HeadersClient; - /// # fn t(consensus:Arc, client: Arc) { + /// # use reth_primitives_traits::BlockHeader; + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -55,7 +56,7 @@ impl TaskDownloader { /// # } pub fn spawn(downloader: T) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -64,7 +65,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, S: TaskSpawner, { let (headers_tx, headers_rx) = mpsc::channel(HEADERS_TASK_BUFFER_SIZE); @@ -81,12 +82,14 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { +impl HeaderDownloader for TaskDownloader { + type Header = H; + + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncGap(head, target)); } - fn update_local_head(&mut self, head: SealedHeader) { + fn update_local_head(&mut self, head: SealedHeader) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateLocalHead(head)); } @@ -99,8 +102,8 @@ impl HeaderDownloader for TaskDownloader { } } -impl Stream for TaskDownloader { - type Item = HeadersDownloaderResult>; +impl Stream for TaskDownloader { + type Item = HeadersDownloaderResult>, H>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -108,9 +111,10 @@ impl Stream for TaskDownloader { } /// A [`HeaderDownloader`] that runs on its own task -struct SpawnedDownloader { - updates: UnboundedReceiverStream, - headers_tx: PollSender>>, +#[expect(clippy::complexity)] +struct SpawnedDownloader { + updates: UnboundedReceiverStream>, + headers_tx: PollSender>, T::Header>>, downloader: T, } @@ -170,9 +174,9 @@ impl Future for SpawnedDownloader { } /// Commands delegated tot the spawned [`HeaderDownloader`] -enum DownloaderUpdates { - UpdateSyncGap(SealedHeader, SyncTarget), - UpdateLocalHead(SealedHeader), +enum DownloaderUpdates { + UpdateSyncGap(SealedHeader, SyncTarget), + UpdateLocalHead(SealedHeader), UpdateSyncTarget(SyncTarget), SetBatchSize(usize), } diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index be8373f8235..d84d92363ee 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -78,6 +78,7 @@ impl DownloadClient for TestBodiesClient { } impl BodiesClient for TestBodiesClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 1d2b5487245..582ab7557f3 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -18,6 +18,7 @@ reth-codecs-derive.workspace = true reth-primitives.workspace = true # ethereum +alloy-consensus.workspace = true alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index cca6600d11e..2a6d973ffc3 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -148,7 +148,7 @@ impl From> for ProtocolMessage { /// Represents messages that can be sent to multiple peers. #[derive(Clone, Debug)] -pub struct ProtocolBroadcastMessage { +pub struct ProtocolBroadcastMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message to be broadcasted, including specific data based on the message diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index ca85fa69ad6..04b8b429e2a 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -2,6 +2,7 @@ use std::fmt::Debug; +use alloy_consensus::BlockHeader; use alloy_rlp::{Decodable, Encodable}; /// Abstraction over primitive types which might appear in network messages. See @@ -10,7 +11,8 @@ pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static { /// The block header type. - type BlockHeader: Encodable + type BlockHeader: BlockHeader + + Encodable + Decodable + Send + Sync diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index d60e500744c..625971e0e7b 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -5,10 +5,11 @@ use crate::{ p2pstream::MAX_RESERVED_MESSAGE_ID, protocol::{ProtoVersion, Protocol}, version::ParseVersionError, - Capability, EthMessage, EthMessageID, EthVersion, + Capability, EthMessageID, EthVersion, }; use alloy_primitives::bytes::Bytes; use derive_more::{Deref, DerefMut}; +use reth_eth_wire_types::{EthMessage, EthNetworkPrimitives, NetworkPrimitives}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -30,9 +31,13 @@ pub struct RawCapabilityMessage { /// network. #[derive(Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum CapabilityMessage { +pub enum CapabilityMessage { /// Eth sub-protocol message. - Eth(EthMessage), + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") + )] + Eth(EthMessage), /// Any other capability message. Other(RawCapabilityMessage), } diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 795dd630780..c971f6182ce 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -8,6 +8,7 @@ use crate::{ use alloy_primitives::bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; +use reth_eth_wire_types::NetworkPrimitives; use reth_primitives::{ForkFilter, GotExpected}; use std::{ pin::Pin, @@ -54,32 +55,32 @@ where /// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status` /// handshake is completed successfully. This also returns the `Status` message sent by the /// remote peer. - pub async fn handshake( + pub async fn handshake( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { self.handshake_with_timeout(status, fork_filter, HANDSHAKE_TIMEOUT).await } /// Wrapper around handshake which enforces a timeout. - pub async fn handshake_with_timeout( + pub async fn handshake_with_timeout( self, status: Status, fork_filter: ForkFilter, timeout_limit: Duration, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { timeout(timeout_limit, Self::handshake_without_timeout(self, status, fork_filter)) .await .map_err(|_| EthStreamError::StreamTimeout)? } /// Handshake with no timeout - pub async fn handshake_without_timeout( + pub async fn handshake_without_timeout( mut self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { trace!( %status, "sending eth status to peer" @@ -89,10 +90,8 @@ where // The max length for a status with TTD is: + self.inner .send( - alloy_rlp::encode(ProtocolMessage::from( - EthMessage::::Status(status), - )) - .into(), + alloy_rlp::encode(ProtocolMessage::::from(EthMessage::::Status(status))) + .into(), ) .await?; @@ -112,15 +111,14 @@ where } let version = status.version; - let msg: ProtocolMessage = - match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { - Ok(m) => m, - Err(err) => { - debug!("decode error in eth handshake: msg={their_msg:x}"); - self.inner.disconnect(DisconnectReason::DisconnectRequested).await?; - return Err(EthStreamError::InvalidMessage(err)) - } - }; + let msg = match ProtocolMessage::::decode_message(version, &mut their_msg.as_ref()) { + Ok(m) => m, + Err(err) => { + debug!("decode error in eth handshake: msg={their_msg:x}"); + self.inner.disconnect(DisconnectReason::DisconnectRequested).await?; + return Err(EthStreamError::InvalidMessage(err)) + } + }; // The following checks should match the checks in go-ethereum: // https://github.com/ethereum/go-ethereum/blob/9244d5cd61f3ea5a7645fdf2a1a96d53421e412f/eth/protocols/eth/handshake.go#L87-L89 @@ -194,19 +192,21 @@ where /// compatible with eth-networking protocol messages, which get RLP encoded/decoded. #[pin_project] #[derive(Debug)] -pub struct EthStream { +pub struct EthStream { /// Negotiated eth version. version: EthVersion, #[pin] inner: S, + + _pd: std::marker::PhantomData, } -impl EthStream { +impl EthStream { /// Creates a new unauthed [`EthStream`] from a provided stream. You will need /// to manually handshake a peer. #[inline] pub const fn new(version: EthVersion, inner: S) -> Self { - Self { version, inner } + Self { version, inner, _pd: std::marker::PhantomData } } /// Returns the eth version. @@ -234,15 +234,16 @@ impl EthStream { } } -impl EthStream +impl EthStream where S: Sink + Unpin, EthStreamError: From, + N: NetworkPrimitives, { /// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead. pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode( ProtocolBroadcastMessage::from(item), @@ -252,12 +253,13 @@ where } } -impl Stream for EthStream +impl Stream for EthStream where S: Stream> + Unpin, EthStreamError: From, + N: NetworkPrimitives, { - type Item = Result; + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); @@ -299,10 +301,11 @@ where } } -impl Sink for EthStream +impl Sink> for EthStream where S: CanDisconnect + Unpin, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { type Error = EthStreamError; @@ -310,7 +313,7 @@ where self.project().inner.poll_ready(cx).map_err(Into::into) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { if matches!(item, EthMessage::Status(_)) { // TODO: to disconnect here we would need to do something similar to P2PStream's // start_disconnect, which would ideally be a part of the CanDisconnect trait, or at @@ -340,10 +343,11 @@ where } } -impl CanDisconnect for EthStream +impl CanDisconnect> for EthStream where S: CanDisconnect + Send, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> { self.inner.disconnect(reason).await.map_err(Into::into) @@ -365,6 +369,7 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; + use reth_eth_wire_types::EthNetworkPrimitives; use reth_network_peers::pk2id; use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; @@ -397,7 +402,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -409,8 +414,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -444,7 +451,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -456,8 +463,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -490,8 +499,9 @@ mod tests { // roughly based off of the design of tokio::net::TcpListener let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); - let handshake_res = - UnauthedEthStream::new(stream).handshake(status_clone, fork_filter_clone).await; + let handshake_res = UnauthedEthStream::new(stream) + .handshake::(status_clone, fork_filter_clone) + .await; // make sure the handshake fails due to td too high assert!(matches!( @@ -506,7 +516,9 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let handshake_res = UnauthedEthStream::new(sink).handshake(status, fork_filter).await; + let handshake_res = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await; // this handshake should also fail due to td too high assert!(matches!( @@ -524,7 +536,7 @@ mod tests { async fn can_write_and_read_cleartext() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -559,7 +571,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -601,7 +613,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -705,7 +717,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -718,7 +730,11 @@ mod tests { // try to connect let handshake_result = UnauthedEthStream::new(sink) - .handshake_with_timeout(status, fork_filter, Duration::from_secs(1)) + .handshake_with_timeout::( + status, + fork_filter, + Duration::from_secs(1), + ) .await; // Assert that a timeout error occurred diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index d1d977aba78..6f882f40887 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -24,6 +24,7 @@ use crate::{ }; use bytes::{Bytes, BytesMut}; use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use reth_eth_wire_types::NetworkPrimitives; use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -204,11 +205,11 @@ impl RlpxProtocolMultiplexer { /// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given /// primary protocol. - pub async fn into_eth_satellite_stream( + pub async fn into_eth_satellite_stream( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> + ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> where St: Stream> + Sink + Unpin, { @@ -674,6 +675,7 @@ mod tests { }, UnauthedP2PStream, }; + use reth_eth_wire_types::EthNetworkPrimitives; use tokio::{net::TcpListener, sync::oneshot}; use tokio_util::codec::Decoder; @@ -693,7 +695,7 @@ mod tests { UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (_eth_stream, _) = UnauthedEthStream::new(p2p_stream) - .handshake(other_status, other_fork_filter) + .handshake::(other_status, other_fork_filter) .await .unwrap(); @@ -708,7 +710,9 @@ mod tests { .into_satellite_stream_with_handshake( eth.capability().as_ref(), move |proxy| async move { - UnauthedEthStream::new(proxy).handshake(status, fork_filter).await + UnauthedEthStream::new(proxy) + .handshake::(status, fork_filter) + .await }, ) .await @@ -731,7 +735,7 @@ mod tests { let (conn, _) = UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(other_status, other_fork_filter) + .into_eth_satellite_stream::(other_status, other_fork_filter) .await .unwrap(); @@ -762,7 +766,7 @@ mod tests { let conn = connect_passthrough(local_addr, test_hello().0).await; let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(status, fork_filter) + .into_eth_satellite_stream::(status, fork_filter) .await .unwrap(); diff --git a/crates/net/network-api/src/downloaders.rs b/crates/net/network-api/src/downloaders.rs index f081c16ed81..cbfe816134e 100644 --- a/crates/net/network-api/src/downloaders.rs +++ b/crates/net/network-api/src/downloaders.rs @@ -1,5 +1,7 @@ //! API related to syncing blocks. +use std::fmt::Debug; + use futures::Future; use reth_network_p2p::BlockClient; use tokio::sync::oneshot; @@ -7,10 +9,13 @@ use tokio::sync::oneshot; /// Provides client for downloading blocks. #[auto_impl::auto_impl(&, Arc)] pub trait BlockDownloaderProvider { + /// The client this type can provide. + type Client: BlockClient + Send + Sync + Clone + 'static; + /// Returns a new [`BlockClient`], used for fetching blocks from peers. /// /// The client is the entrypoint for sending block requests to the network. fn fetch_client( &self, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index d2bd66d1fdd..af392b6f9ea 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -4,8 +4,9 @@ use std::{fmt, net::SocketAddr, sync::Arc}; use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, - EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, - NodeData, PooledTransactions, Receipts, Status, + EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, + Status, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; @@ -30,8 +31,8 @@ pub trait NetworkEventListenerProvider: Send + Sync { /// /// This includes any event types that may be relevant to tasks, for metrics, keep track of peers /// etc. -#[derive(Debug, Clone)] -pub enum NetworkEvent { +#[derive(Debug)] +pub enum NetworkEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -50,7 +51,7 @@ pub enum NetworkEvent { /// Capabilities the peer announced capabilities: Arc, /// A request channel to the session task. - messages: PeerRequestSender, + messages: PeerRequestSender, /// The status of the peer to which a session was established. status: Arc, /// negotiated eth version of the session @@ -62,6 +63,35 @@ pub enum NetworkEvent { PeerRemoved(PeerId), } +impl Clone for NetworkEvent { + fn clone(&self) -> Self { + match self { + Self::SessionClosed { peer_id, reason } => { + Self::SessionClosed { peer_id: *peer_id, reason: *reason } + } + Self::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + } => Self::SessionEstablished { + peer_id: *peer_id, + remote_addr: *remote_addr, + client_version: client_version.clone(), + capabilities: capabilities.clone(), + messages: messages.clone(), + status: status.clone(), + version: *version, + }, + Self::PeerAdded(peer) => Self::PeerAdded(*peer), + Self::PeerRemoved(peer) => Self::PeerRemoved(*peer), + } + } +} + /// Events produced by the `Discovery` manager. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveryEvent { @@ -98,7 +128,7 @@ pub enum DiscoveredEvent { /// Protocol related request messages that expect a response #[derive(Debug)] -pub enum PeerRequest { +pub enum PeerRequest { /// Requests block headers from the peer. /// /// The response should be sent through the channel. @@ -106,7 +136,7 @@ pub enum PeerRequest { /// The request for block headers. request: GetBlockHeaders, /// The channel to send the response for block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests block bodies from the peer. /// @@ -115,7 +145,7 @@ pub enum PeerRequest { /// The request for block bodies. request: GetBlockBodies, /// The channel to send the response for block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests pooled transactions from the peer. /// @@ -148,7 +178,7 @@ pub enum PeerRequest { // === impl PeerRequest === -impl PeerRequest { +impl PeerRequest { /// Invoked if we received a response which does not match the request pub fn send_bad_response(self) { self.send_err_response(RequestError::BadResponse) @@ -166,7 +196,7 @@ impl PeerRequest { } /// Returns the [`EthMessage`] for this type - pub fn create_request_message(&self, request_id: u64) -> EthMessage { + pub fn create_request_message(&self, request_id: u64) -> EthMessage { match self { Self::GetBlockHeaders { request, .. } => { EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request }) @@ -199,24 +229,29 @@ impl PeerRequest { } /// A Cloneable connection for sending _requests_ directly to the session of a peer. -#[derive(Clone)] -pub struct PeerRequestSender { +pub struct PeerRequestSender { /// id of the remote node. pub peer_id: PeerId, /// The Sender half connected to a session. - pub to_session_tx: mpsc::Sender, + pub to_session_tx: mpsc::Sender, +} + +impl Clone for PeerRequestSender { + fn clone(&self) -> Self { + Self { peer_id: self.peer_id, to_session_tx: self.to_session_tx.clone() } + } } // === impl PeerRequestSender === -impl PeerRequestSender { +impl PeerRequestSender { /// Constructs a new sender instance that's wired to a session - pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { + pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { Self { peer_id, to_session_tx } } /// Attempts to immediately send a message on this Sender - pub fn try_send(&self, req: PeerRequest) -> Result<(), mpsc::error::TrySendError> { + pub fn try_send(&self, req: R) -> Result<(), mpsc::error::TrySendError> { self.to_session_tx.try_send(req) } @@ -226,7 +261,7 @@ impl PeerRequestSender { } } -impl fmt::Debug for PeerRequestSender { +impl fmt::Debug for PeerRequestSender { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeerRequestSender").field("peer_id", &self.peer_id).finish_non_exhaustive() } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6163c873003..986d490c34f 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -36,6 +36,7 @@ pub use events::{ use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; use reth_eth_wire_types::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_network_p2p::EthBlockClient; use reth_network_peers::NodeRecord; /// The `PeerId` type. @@ -43,7 +44,7 @@ pub type PeerId = alloy_primitives::B512; /// Helper trait that unifies network API needed to launch node. pub trait FullNetwork: - BlockDownloaderProvider + BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider @@ -55,7 +56,7 @@ pub trait FullNetwork: } impl FullNetwork for T where - T: BlockDownloaderProvider + T: BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index f444aa7fe27..148eef34b36 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -34,6 +34,7 @@ reth-network-peers = { workspace = true, features = ["net"] } reth-network-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index c47ee5d234f..584c079b8d8 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -7,6 +7,7 @@ use std::sync::{ use alloy_primitives::B256; use futures::{future, future::Either}; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -17,7 +18,6 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::Header; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; @@ -30,16 +30,16 @@ use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; /// /// include_mmd!("docs/mermaid/fetch-client.mmd") #[derive(Debug, Clone)] -pub struct FetchClient { +pub struct FetchClient { /// Sender half of the request channel. - pub(crate) request_tx: UnboundedSender, + pub(crate) request_tx: UnboundedSender>, /// The handle to the peers pub(crate) peers_handle: PeersHandle, /// Number of active peer sessions the node's currently handling. pub(crate) num_active_peers: Arc, } -impl DownloadClient for FetchClient { +impl DownloadClient for FetchClient { fn report_bad_message(&self, peer_id: PeerId) { self.peers_handle.reputation_change(peer_id, ReputationChangeKind::BadMessage); } @@ -53,8 +53,9 @@ impl DownloadClient for FetchClient { // or an error. type HeadersClientFuture = Either, future::Ready>; -impl HeadersClient for FetchClient { - type Output = HeadersClientFuture>>; +impl HeadersClient for FetchClient { + type Header = N::BlockHeader; + type Output = HeadersClientFuture>>; /// Sends a `GetBlockHeaders` request to an available peer. fn get_headers_with_priority( @@ -75,8 +76,9 @@ impl HeadersClient for FetchClient { } } -impl BodiesClient for FetchClient { - type Output = BodiesFut; +impl BodiesClient for FetchClient { + type Body = N::BlockBody; + type Output = BodiesFut; /// Sends a `GetBlockBodies` request to an available peer. fn get_block_bodies_with_priority( diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index d37fa8b4f4a..8af6300b705 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -15,7 +15,7 @@ use std::{ use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{GetBlockBodies, GetBlockHeaders}; +use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -24,12 +24,14 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{BlockBody, Header}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; use crate::message::BlockRequest; +type InflightHeadersRequest = Request>>; +type InflightBodiesRequest = Request, PeerRequestResult>>; + /// Manages data fetching operations. /// /// This type is hooked into the staged sync pipeline and delegates download request to available @@ -37,13 +39,11 @@ use crate::message::BlockRequest; /// /// This type maintains a list of connected peers that are available for requests. #[derive(Debug)] -pub struct StateFetcher { +pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests - inflight_headers_requests: - HashMap>>>, + inflight_headers_requests: HashMap>, /// Currently active [`GetBlockBodies`] requests - inflight_bodies_requests: - HashMap, PeerRequestResult>>>, + inflight_bodies_requests: HashMap>, /// The list of _available_ peers for requests. peers: HashMap, /// The handle to the peers manager @@ -51,16 +51,16 @@ pub struct StateFetcher { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Requests queued for processing - queued_requests: VecDeque, + queued_requests: VecDeque>, /// Receiver for new incoming download requests - download_requests_rx: UnboundedReceiverStream, + download_requests_rx: UnboundedReceiverStream>, /// Sender for download requests, used to detach a [`FetchClient`] - download_requests_tx: UnboundedSender, + download_requests_tx: UnboundedSender>, } // === impl StateSyncer === -impl StateFetcher { +impl StateFetcher { pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc) -> Self { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { @@ -217,7 +217,7 @@ impl StateFetcher { /// Handles a new request to a peer. /// /// Caution: this assumes the peer exists and is idle - fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { + fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { // update the peer's state if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = req.peer_state(); @@ -260,7 +260,7 @@ impl StateFetcher { pub(crate) fn on_block_headers_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_error = res.is_err(); let maybe_reputation_change = res.reputation_change_err(); @@ -296,7 +296,7 @@ impl StateFetcher { pub(crate) fn on_block_bodies_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); @@ -315,7 +315,7 @@ impl StateFetcher { } /// Returns a new [`FetchClient`] that can send requests to this type. - pub(crate) fn client(&self) -> FetchClient { + pub(crate) fn client(&self) -> FetchClient { FetchClient { request_tx: self.download_requests_tx.clone(), peers_handle: self.peers_handle.clone(), @@ -405,24 +405,24 @@ struct Request { /// Requests that can be sent to the Syncer from a [`FetchClient`] #[derive(Debug)] -pub(crate) enum DownloadRequest { +pub(crate) enum DownloadRequest { /// Download the requested headers and send response through channel GetBlockHeaders { request: HeadersRequest, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, /// Download the requested headers and send response through channel GetBlockBodies { request: Vec, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, } // === impl DownloadRequest === -impl DownloadRequest { +impl DownloadRequest { /// Returns the corresponding state for a peer that handles the request. const fn peer_state(&self) -> PeerState { match self { @@ -472,13 +472,14 @@ pub(crate) enum BlockResponseOutcome { mod tests { use super::*; use crate::{peers::PeersManager, PeersConfig}; + use alloy_consensus::Header; use alloy_primitives::B512; use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -498,7 +499,7 @@ mod tests { #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -521,7 +522,7 @@ mod tests { #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -546,7 +547,7 @@ mod tests { #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -576,7 +577,7 @@ mod tests { #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); let request_pair = || { @@ -610,7 +611,10 @@ mod tests { let outcome = fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)).unwrap(); - assert!(EthResponseValidator::reputation_change_err(&Err(RequestError::Timeout)).is_some()); + assert!(EthResponseValidator::reputation_change_err(&Err::, _>( + RequestError::Timeout + )) + .is_some()); match outcome { BlockResponseOutcome::BadResponse(peer, _) => { diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 6b8287fe51c..bdb13875f12 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -12,12 +12,13 @@ use alloy_primitives::{Bytes, B256}; use futures::FutureExt; use reth_eth_wire::{ capability::RawCapabilityMessage, message::RequestPair, BlockBodies, BlockHeaders, EthMessage, - GetBlockBodies, GetBlockHeaders, NewBlock, NewBlockHashes, NewPooledTransactionHashes, - NodeData, PooledTransactions, Receipts, SharedTransactions, Transactions, + EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock, + NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, + SharedTransactions, Transactions, }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{BlockBody, Header, PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::{PooledTransactionsElement, ReceiptWithBloom}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message @@ -74,16 +75,16 @@ pub enum BlockRequest { /// Corresponding variant for [`PeerRequest`]. #[derive(Debug)] -pub enum PeerResponse { +pub enum PeerResponse { /// Represents a response to a request for block headers. BlockHeaders { /// The receiver channel for the response to a block headers request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for block bodies. BlockBodies { /// The receiver channel for the response to a block bodies request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for pooled transactions. PooledTransactions { @@ -104,9 +105,9 @@ pub enum PeerResponse { // === impl PeerResponse === -impl PeerResponse { +impl PeerResponse { /// Polls the type to completion. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { macro_rules! poll_request { ($response:ident, $item:ident, $cx:ident) => { match ready!($response.poll_unpin($cx)) { @@ -139,11 +140,11 @@ impl PeerResponse { /// All response variants for [`PeerResponse`] #[derive(Debug)] -pub enum PeerResponseResult { +pub enum PeerResponseResult { /// Represents a result containing block headers or an error. - BlockHeaders(RequestResult>), + BlockHeaders(RequestResult>), /// Represents a result containing block bodies or an error. - BlockBodies(RequestResult>), + BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. @@ -154,9 +155,9 @@ pub enum PeerResponseResult { // === impl PeerResponseResult === -impl PeerResponseResult { +impl PeerResponseResult { /// Converts this response into an [`EthMessage`] - pub fn try_into_message(self, id: u64) -> RequestResult { + pub fn try_into_message(self, id: u64) -> RequestResult> { macro_rules! to_message { ($response:ident, $item:ident, $request_id:ident) => { match $response { diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 594ad4d155d..4175757e0cf 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -18,10 +18,7 @@ use reth_network_api::{ NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers, PeersInfo, }; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}, - BlockClient, -}; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; use reth_primitives::{Head, TransactionSigned}; @@ -400,7 +397,9 @@ impl NetworkSyncUpdater for NetworkHandle { } impl BlockDownloaderProvider for NetworkHandle { - async fn fetch_client(&self) -> Result { + type Client = FetchClient; + + async fn fetch_client(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::FetchClient(tx)); rx.await diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 5caa656a98e..9ad7b53518b 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -14,7 +14,10 @@ use std::{ use alloy_primitives::B256; use rand::seq::SliceRandom; -use reth_eth_wire::{BlockHashNumber, Capabilities, DisconnectReason, NewBlockHashes, Status}; +use reth_eth_wire::{ + BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, + NewBlockHashes, Status, +}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::{PeerAddr, PeerKind}; @@ -69,9 +72,9 @@ impl Deref for BlockNumReader { /// /// This type is also responsible for responding for received request. #[derive(Debug)] -pub struct NetworkState { +pub struct NetworkState { /// All active peers and their state. - active_peers: HashMap, + active_peers: HashMap>, /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. @@ -88,10 +91,10 @@ pub struct NetworkState { /// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type /// will then queue in the request and notify the fetcher once the result has been /// received. - state_fetcher: StateFetcher, + state_fetcher: StateFetcher, } -impl NetworkState { +impl NetworkState { /// Create a new state instance with the given params pub(crate) fn new( client: BlockNumReader, @@ -126,7 +129,7 @@ impl NetworkState { } /// Returns a new [`FetchClient`] - pub(crate) fn fetch_client(&self) -> FetchClient { + pub(crate) fn fetch_client(&self) -> FetchClient { self.state_fetcher.client() } @@ -144,7 +147,7 @@ impl NetworkState { peer: PeerId, capabilities: Arc, status: Arc, - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, timeout: Arc, ) { debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible"); @@ -399,7 +402,11 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { + fn on_eth_response( + &mut self, + peer: PeerId, + resp: PeerResponseResult, + ) -> Option { match resp { PeerResponseResult::BlockHeaders(res) => { let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; @@ -492,16 +499,16 @@ impl NetworkState { /// /// For example known blocks,so we can decide what to announce. #[derive(Debug)] -pub(crate) struct ActivePeer { +pub(crate) struct ActivePeer { /// Best block of the peer. pub(crate) best_hash: B256, /// The capabilities of the remote peer. #[allow(dead_code)] pub(crate) capabilities: Arc, /// A communication channel directly to the session task. - pub(crate) request_tx: PeerRequestSender, + pub(crate) request_tx: PeerRequestSender>, /// The response receiver for a currently active request to that peer. - pub(crate) pending_response: Option, + pub(crate) pending_response: Option>, /// Blocks we know the peer has. pub(crate) blocks: LruCache, } diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 89855396925..9348bf2d041 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -22,6 +22,7 @@ reth-network-types.workspace = true reth-storage-errors.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true @@ -55,5 +56,6 @@ std = [ "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", - "reth-primitives-traits/std" + "reth-primitives-traits/std", + "alloy-consensus/std", ] diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index 2a4b57c2345..d48fccc6d00 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -9,13 +9,16 @@ use futures::{Future, FutureExt}; use reth_primitives::BlockBody; /// The bodies future type -pub type BodiesFut = Pin>> + Send + Sync>>; +pub type BodiesFut = + Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. #[auto_impl::auto_impl(&, Arc, Box)] pub trait BodiesClient: DownloadClient { + /// The body type this client fetches. + type Body: Send + Sync + Unpin + 'static; /// The output of the request future for querying block bodies. - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Fetches the block body for the requested block. fn get_block_bodies(&self, hashes: Vec) -> Self::Output { @@ -49,11 +52,11 @@ pub struct SingleBodyRequest { fut: Fut, } -impl Future for SingleBodyRequest +impl Future for SingleBodyRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index b55229fa242..f335b21438b 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,14 +5,19 @@ use futures::Stream; use std::ops::RangeInclusive; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. -pub trait BodyDownloader: Send + Sync + Stream + Unpin { +pub trait BodyDownloader: + Send + Sync + Stream> + Unpin +{ + /// The type of the body that is being downloaded. + type Body: Send + Sync + Unpin + 'static; + /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; } diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 0a45008acd8..8737647bd79 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,17 +1,17 @@ use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse { /// Return the reference to the response header pub const fn header(&self) -> &SealedHeader { match self { @@ -34,8 +34,7 @@ impl BlockResponse { } } -impl InMemorySize for BlockResponse { - /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. +impl InMemorySize for BlockResponse { #[inline] fn size(&self) -> usize { match self { diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 30650069b91..3f1182bd482 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -32,8 +32,9 @@ where impl BodiesClient for Either where A: BodiesClient, - B: BodiesClient, + B: BodiesClient, { + type Body = A::Body; type Output = Either; fn get_block_bodies_with_priority( @@ -51,8 +52,9 @@ where impl HeadersClient for Either where A: HeadersClient, - B: HeadersClient, + B: HeadersClient
, { + type Header = A::Header; type Output = Either; fn get_headers_with_priority( diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 9394a9fdf6c..181a0b96b3c 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,13 +1,14 @@ use std::ops::RangeInclusive; use super::headers::client::HeadersRequest; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{GotExpected, GotExpectedBoxed, Header}; +use reth_primitives::{GotExpected, GotExpectedBoxed}; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use tokio::sync::{mpsc, oneshot}; @@ -26,7 +27,7 @@ pub trait EthResponseValidator { fn reputation_change_err(&self) -> Option; } -impl EthResponseValidator for RequestResult> { +impl EthResponseValidator for RequestResult> { fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool { match self { Ok(headers) => { @@ -38,7 +39,7 @@ impl EthResponseValidator for RequestResult> { match request.start { BlockHashOrNumber::Number(block_number) => { - headers.first().is_some_and(|header| block_number != header.number) + headers.first().is_some_and(|header| block_number != header.number()) } BlockHashOrNumber::Hash(_) => { // we don't want to hash the header @@ -216,6 +217,8 @@ impl From for DownloadError { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use super::*; #[test] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index a61d4ea126d..8f176f8da8a 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -5,16 +5,18 @@ use crate::{ headers::client::{HeadersClient, SingleHeaderRequest}, BlockClient, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; use reth_consensus::Consensus; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, fmt::Debug, future::Future, + hash::Hash, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -23,14 +25,23 @@ use tracing::debug; /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] -pub struct FullBlockClient { +pub struct FullBlockClient +where + Client: BlockClient, +{ client: Client, - consensus: Arc, + consensus: Arc>, } -impl FullBlockClient { +impl FullBlockClient +where + Client: BlockClient, +{ /// Creates a new instance of `FullBlockClient`. - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { client, consensus } } @@ -111,16 +122,16 @@ where Client: BlockClient, { client: Client, - consensus: Arc, + consensus: Arc>, hash: B256, request: FullBlockRequest, - header: Option, - body: Option, + header: Option>, + body: Option>, } impl FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the hash of the block being requested. pub const fn hash(&self) -> &B256 { @@ -129,11 +140,11 @@ where /// If the header request is already complete, this returns the block number pub fn block_number(&self) -> Option { - self.header.as_ref().map(|h| h.number) + self.header.as_ref().map(|h| h.number()) } /// Returns the [`SealedBlock`] if the request is complete and valid. - fn take_block(&mut self) -> Option { + fn take_block(&mut self) -> Option> { if self.header.is_none() || self.body.is_none() { return None } @@ -157,7 +168,7 @@ where } } - fn on_block_response(&mut self, resp: WithPeerId) { + fn on_block_response(&mut self, resp: WithPeerId) { if let Some(ref header) = self.header { if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); @@ -173,9 +184,9 @@ where impl Future for FetchFullBlockFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = SealedBlock; + type Output = SealedBlock; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -252,7 +263,7 @@ where impl Debug for FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FetchFullBlockFuture") @@ -275,7 +286,7 @@ impl FullBlockRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.header = None; @@ -296,18 +307,18 @@ where /// The result of a request for a single header or body. This is yielded by the `FullBlockRequest` /// future. -enum ResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum ResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } /// The response of a body request. #[derive(Debug)] -enum BodyResponse { +enum BodyResponse { /// Already validated against transaction root of header - Validated(BlockBody), + Validated(B), /// Still needs to be validated against header - PendingValidation(WithPeerId), + PendingValidation(WithPeerId), } /// A future that downloads a range of full blocks from the network. /// @@ -330,7 +341,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -338,16 +349,16 @@ where /// Requests for headers and bodies that are in progress. request: FullBlockRangeRequest, /// Fetched headers. - headers: Option>, + headers: Option>>, /// The next headers to request bodies for. This is drained as responses are received. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// The bodies that have been received so far. - bodies: HashMap, + bodies: HashMap, BodyResponse>, } impl FetchFullBlockRangeFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the block hashes for the given range, if they are available. pub fn range_block_hashes(&self) -> Option> { @@ -362,14 +373,14 @@ where /// Inserts a block body, matching it with the `next_header`. /// /// Note: this assumes the response matches the next header in the queue. - fn insert_body(&mut self, body_response: BodyResponse) { + fn insert_body(&mut self, body_response: BodyResponse) { if let Some(header) = self.pending_headers.pop_front() { self.bodies.insert(header, body_response); } } /// Inserts multiple block bodies. - fn insert_bodies(&mut self, bodies: impl IntoIterator) { + fn insert_bodies(&mut self, bodies: impl IntoIterator>) { for body in bodies { self.insert_body(body); } @@ -388,7 +399,7 @@ where /// /// These are returned in falling order starting with the requested `hash`, i.e. with /// descending block numbers. - fn take_blocks(&mut self) -> Option> { + fn take_blocks(&mut self) -> Option>> { if !self.is_bodies_complete() { // not done with bodies yet return None @@ -445,7 +456,7 @@ where Some(valid_responses) } - fn on_headers_response(&mut self, headers: WithPeerId>) { + fn on_headers_response(&mut self, headers: WithPeerId>) { let (peer, mut headers_falling) = headers .map(|h| { h.into_iter() @@ -461,7 +472,7 @@ where // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { // sort headers from highest to lowest block number - headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); + headers_falling.sort_unstable_by_key(|h| Reverse(h.number())); // check the starting hash if headers_falling[0].hash() == self.start_hash { @@ -512,9 +523,9 @@ where impl Future for FetchFullBlockRangeFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = Vec; + type Output = Vec>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -621,7 +632,10 @@ impl FullBlockRangeRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.headers = None; @@ -642,13 +656,15 @@ where // The result of a request for headers or block bodies. This is yielded by the // `FullBlockRangeRequest` future. -enum RangeResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum RangeResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } #[cfg(test)] mod tests { + use reth_primitives::BlockBody; + use super::*; use crate::test_utils::TestFullBlockClient; use std::ops::Range; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index b73ea4e925f..585f2ab18a0 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -27,8 +27,10 @@ pub type HeadersFut = Pin> /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait HeadersClient: DownloadClient { + /// The header type this client fetches. + type Header: Send + Sync + Unpin; /// The headers future type - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Sends the header request to the p2p network and returns the header response received from a /// peer. @@ -73,11 +75,11 @@ pub struct SingleHeaderRequest { fut: Fut, } -impl Future for SingleHeaderRequest +impl Future for SingleHeaderRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 5565880ed39..59ecb58b84d 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,5 +1,6 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::Stream; @@ -13,19 +14,25 @@ use reth_primitives::SealedHeader; /// /// A [`HeaderDownloader`] is a [Stream] that returns batches of headers. pub trait HeaderDownloader: - Send + Sync + Stream>> + Unpin + Send + + Sync + + Stream>, Self::Header>> + + Unpin { + /// The header type being downloaded. + type Header: Send + Sync + Unpin + 'static; + /// Updates the gap to sync which ranges from local head to the sync target /// /// See also [`HeaderDownloader::update_sync_target`] and /// [`HeaderDownloader::update_local_head`] - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { self.update_local_head(head); self.update_sync_target(target); } /// Updates the block number of the local database - fn update_local_head(&mut self, head: SealedHeader); + fn update_local_head(&mut self, head: SealedHeader); /// Updates the target we want to sync to fn update_sync_target(&mut self, target: SyncTarget); @@ -74,23 +81,23 @@ impl SyncTarget { /// Validate whether the header is valid in relation to it's parent /// /// Returns Ok(false) if the -pub fn validate_header_download( - consensus: &dyn Consensus, - header: &SealedHeader, - parent: &SealedHeader, +pub fn validate_header_download( + consensus: &dyn Consensus, + header: &SealedHeader, + parent: &SealedHeader, ) -> DownloadResult<()> { // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), })?; Ok(()) diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index b22aae9248e..8757bb215f5 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -3,19 +3,19 @@ use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; /// Header downloader result -pub type HeadersDownloaderResult = Result; +pub type HeadersDownloaderResult = Result>; /// Error variants that can happen when sending requests to a session. #[derive(Debug, Clone, Eq, PartialEq, Display, Error)] -pub enum HeadersDownloaderError { +pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. #[display("valid downloaded header cannot be attached to the local head: {error}")] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box>, /// The header we attempted to attach. - header: Box, + header: Box>, /// The error that occurred when attempting to attach the header. #[error(source)] error: Box, diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 2ba8012f0ae..98d83c2d1a8 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -52,3 +52,14 @@ pub use headers::client::HeadersClient; pub trait BlockClient: HeadersClient + BodiesClient + Unpin + Clone {} impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone {} + +/// The [`BlockClient`] providing Ethereum block parts. +pub trait EthBlockClient: + BlockClient
+{ +} + +impl EthBlockClient for T where + T: BlockClient
+{ +} diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index cfd29212916..0689d403f2c 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -36,6 +36,7 @@ impl BodiesClient for TestBodiesClient where F: Fn(Vec) -> PeerRequestResult> + Send + Sync, { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 8a13f69325d..97d867531ad 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -40,6 +40,7 @@ impl DownloadClient for NoopFullBlockClient { /// Implements the `BodiesClient` trait for the `NoopFullBlockClient` struct. impl BodiesClient for NoopFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; @@ -65,6 +66,7 @@ impl BodiesClient for NoopFullBlockClient { } impl HeadersClient for NoopFullBlockClient { + type Header = Header; /// The output type representing a future containing a peer request result with a vector of /// headers. type Output = futures::future::Ready>>; @@ -152,6 +154,7 @@ impl DownloadClient for TestFullBlockClient { /// Implements the `HeadersClient` trait for the `TestFullBlockClient` struct. impl HeadersClient for TestFullBlockClient { + type Header = Header; /// Specifies the associated output type. type Output = futures::future::Ready>>; @@ -205,6 +208,7 @@ impl HeadersClient for TestFullBlockClient { /// Implements the `BodiesClient` trait for the `TestFullBlockClient` struct. impl BodiesClient for TestFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 4f603f6339b..d8d4bbc6b7a 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -62,6 +62,8 @@ impl TestHeaderDownloader { } impl HeaderDownloader for TestHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _head: SealedHeader) {} fn update_sync_target(&mut self, _target: SyncTarget) {} @@ -72,7 +74,7 @@ impl HeaderDownloader for TestHeaderDownloader { } impl Stream for TestHeaderDownloader { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult, Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -229,6 +231,7 @@ impl DownloadClient for TestHeadersClient { } impl HeadersClient for TestHeadersClient { + type Header = Header; type Output = TestHeadersFut; fn get_headers_with_priority( diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 856f86c6fe0..ec4912fdd86 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -760,7 +760,7 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient, + C: HeadersClient
, { self.node_config().max_block(client, self.provider_factory().clone()).await } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3591868ddad..d8405dad77f 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -12,7 +12,7 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; @@ -38,7 +38,7 @@ pub fn build_networked_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, { // building network downloaders using the fetch client @@ -84,8 +84,8 @@ pub fn build_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader + 'static, - B: BodyDownloader + 'static, + H: HeaderDownloader
+ 'static, + B: BodyDownloader + 'static, Executor: BlockExecutorProvider, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1c6c9d98c80..c667a56293c 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -13,7 +13,9 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true @@ -30,7 +32,6 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 3848772c415..24d5588b688 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -8,6 +8,7 @@ use crate::{ dirs::{ChainPath, DataDirPath}, utils::get_single_header, }; +use alloy_consensus::BlockHeader; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; @@ -273,7 +274,7 @@ impl NodeConfig { ) -> eyre::Result> where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let max_block = if let Some(block) = self.debug.max_block { Some(block) @@ -332,7 +333,7 @@ impl NodeConfig { ) -> ProviderResult where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let header = provider.header_by_hash_or_number(tip.into())?; @@ -342,7 +343,7 @@ impl NodeConfig { return Ok(header.number) } - Ok(self.fetch_tip_from_network(client, tip.into()).await.number) + Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) } /// Attempt to look up the block with the given number and return the header. @@ -352,9 +353,9 @@ impl NodeConfig { &self, client: Client, tip: BlockHashOrNumber, - ) -> SealedHeader + ) -> SealedHeader where - Client: HeadersClient, + Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); let mut fetch_failures = 0; diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a04d4e324e1..7aeb14c4c0e 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,12 +1,12 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_chainspec::ChainSpec; -use reth_consensus_common::validation::validate_block_pre_execution; +use reth_consensus::Consensus; use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, @@ -16,7 +16,6 @@ use reth_primitives::{SealedBlock, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, - sync::Arc, }; use tracing::{debug, info}; @@ -41,9 +40,9 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result( client: Client, id: BlockHashOrNumber, -) -> Result +) -> Result> where - Client: HeadersClient, + Client: HeadersClient, { let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id }; @@ -61,7 +60,7 @@ where let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, - BlockHashOrNumber::Number(number) => header.number == number, + BlockHashOrNumber::Number(number) => header.number() == number, }; if !valid { @@ -77,11 +76,11 @@ where } /// Get a body from network based on header -pub async fn get_single_body( +pub async fn get_single_body( client: Client, - chain_spec: Arc, - header: SealedHeader, -) -> Result + header: SealedHeader, + consensus: impl Consensus, +) -> Result> where Client: BodiesClient, { @@ -95,7 +94,7 @@ where let body = response.unwrap(); let block = SealedBlock { header, body }; - validate_block_pre_execution(&block, &chain_spec)?; + consensus.validate_block_pre_execution(&block)?; Ok(block) } diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 8ad85a5961a..7ab76f24987 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -5,6 +5,8 @@ use core::fmt; use alloy_primitives::Sealable; use reth_codecs::Compact; +use crate::InMemorySize; + /// Helper trait that unifies all behaviour required by block header to support full node /// operations. pub trait FullBlockHeader: BlockHeader + Compact {} @@ -21,12 +23,11 @@ pub trait BlockHeader: + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + alloy_consensus::BlockHeader + Sealable + + InMemorySize { } @@ -45,5 +46,6 @@ impl BlockHeader for T where + alloy_rlp::Decodable + alloy_consensus::BlockHeader + Sealable + + InMemorySize { } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 995c13748b3..b0fe4434298 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -56,10 +56,10 @@ impl SealedHeader { } } -impl SealedHeader { +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number, self.hash) + BlockNumHash::new(self.number(), self.hash) } } diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 173f8cedc9e..0c250688e05 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -3,3 +3,9 @@ pub trait InMemorySize { /// Returns a heuristic for the in-memory size of a struct. fn size(&self) -> usize; } + +impl InMemorySize for alloy_consensus::Header { + fn size(&self) -> usize { + self.size() + } +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 275f86c5b45..c0586ed6a8f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -428,8 +428,7 @@ impl SealedBlock { } } -impl InMemorySize for SealedBlock { - /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. +impl InMemorySize for SealedBlock { #[inline] fn size(&self) -> usize { self.header.size() + self.body.size() diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 06a5250913e..021a9ab192c 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -60,7 +60,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>, + buffer: Option>>, } impl BodyStage { @@ -70,9 +70,10 @@ impl BodyStage { } } -impl Stage for BodyStage +impl Stage for BodyStage where Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, + D: BodyDownloader, { /// Return the id of the stage fn id(&self) -> StageId { @@ -889,6 +890,8 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Body = BlockBody; + fn set_download_range( &mut self, range: RangeInclusive, @@ -909,7 +912,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 49e687a96a1..2be78b88169 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -194,7 +194,7 @@ where impl Stage for HeaderStage where P: HeaderSyncGapProvider, - D: HeaderDownloader, + D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, { /// Return the id of the stage @@ -441,7 +441,9 @@ mod tests { } } - impl StageTestRunner for HeadersTestRunner { + impl + 'static> StageTestRunner + for HeadersTestRunner + { type S = HeaderStage, D>; fn db(&self) -> &TestStageDB { @@ -459,7 +461,9 @@ mod tests { } } - impl ExecuteStageTestRunner for HeadersTestRunner { + impl + 'static> ExecuteStageTestRunner + for HeadersTestRunner + { type Seed = Vec; fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -537,7 +541,9 @@ mod tests { } } - impl UnwindStageTestRunner for HeadersTestRunner { + impl + 'static> UnwindStageTestRunner + for HeadersTestRunner + { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { self.check_no_header_entry_above(input.unwind_to) } From 0cd34f911ca693a08d31d37e54abc575efdf9696 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Tue, 12 Nov 2024 22:27:28 +0700 Subject: [PATCH 431/970] feat: add ovm `BlockFileCodec` (#12247) Co-authored-by: Emilia Hane --- Cargo.lock | 4 + crates/consensus/beacon/Cargo.toml | 17 +- crates/net/downloaders/Cargo.toml | 13 +- crates/net/downloaders/src/file_client.rs | 3 +- crates/optimism/cli/Cargo.toml | 26 +- crates/optimism/cli/src/lib.rs | 5 + crates/optimism/cli/src/ovm_file_codec.rs | 382 ++++++++++++++++++++++ crates/primitives/src/transaction/mod.rs | 12 +- 8 files changed, 439 insertions(+), 23 deletions(-) create mode 100644 crates/optimism/cli/src/ovm_file_codec.rs diff --git a/Cargo.lock b/Cargo.lock index 4792cb094de..7d9772a3909 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8158,9 +8158,12 @@ dependencies = [ name = "reth-optimism-cli" version = "1.1.1" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "clap", + "derive_more 1.0.0", "eyre", "futures-util", "op-alloy-consensus", @@ -8194,6 +8197,7 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "serde", "tempfile", "tokio", "tokio-util", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index d3aa5124668..0139be2f680 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -73,15 +73,16 @@ reth-exex-types.workspace = true reth-prune-types.workspace = true reth-chainspec.workspace = true alloy-genesis.workspace = true - assert_matches.workspace = true [features] optimism = [ - "reth-chainspec", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-blockchain-tree/optimism", - "reth-db/optimism", - "reth-db-api/optimism", -] + "reth-blockchain-tree/optimism", + "reth-chainspec", + "reth-db-api/optimism", + "reth-db/optimism", + "reth-downloaders/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-downloaders/optimism" +] \ No newline at end of file diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 38e46bb6011..f4cc134ec48 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -46,9 +46,9 @@ reth-metrics.workspace = true metrics.workspace = true # misc -tracing.workspace = true rayon.workspace = true thiserror.workspace = true +tracing.workspace = true tempfile = { workspace = true, optional = true } itertools.workspace = true @@ -72,9 +72,16 @@ rand.workspace = true tempfile.workspace = true [features] +optimism = [ + "reth-primitives/optimism", + "reth-db?/optimism", + "reth-db-api?/optimism", + "reth-provider/optimism" +] + test-utils = [ - "dep:tempfile", - "dep:reth-db-api", + "tempfile", + "reth-db-api", "reth-db/test-utils", "reth-consensus/test-utils", "reth-network-p2p/test-utils", diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index df35146e940..9f539a5774d 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -19,9 +19,8 @@ use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; use tracing::{debug, trace, warn}; -use crate::receipt_file_client::FromReceiptReader; - use super::file_codec::BlockFileCodec; +use crate::receipt_file_client::FromReceiptReader; /// Default byte length of chunk to read from chain file. /// diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index a2ba71214f5..198e5377ec4 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -47,11 +47,15 @@ reth-node-builder.workspace = true reth-tracing.workspace = true # eth +alloy-eips.workspace = true +alloy-consensus = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rlp.workspace = true # misc futures-util.workspace = true +derive_more = { workspace = true, optional = true } +serde = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env"] } @@ -67,9 +71,7 @@ eyre.workspace = true # reth test-vectors proptest = { workspace = true, optional = true } -op-alloy-consensus = { workspace = true, features = [ - "arbitrary", -], optional = true } +op-alloy-consensus = { workspace = true, optional = true } [dev-dependencies] @@ -80,6 +82,10 @@ reth-cli-commands.workspace = true [features] optimism = [ + "op-alloy-consensus", + "alloy-consensus", + "dep:derive_more", + "dep:serde", "reth-primitives/optimism", "reth-optimism-evm/optimism", "reth-provider/optimism", @@ -87,7 +93,8 @@ optimism = [ "reth-optimism-node/optimism", "reth-execution-types/optimism", "reth-db/optimism", - "reth-db-api/optimism" + "reth-db-api/optimism", + "reth-downloaders/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -104,6 +111,13 @@ jemalloc = [ dev = [ "dep:proptest", - "reth-cli-commands/arbitrary", - "op-alloy-consensus" + "reth-cli-commands/arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "op-alloy-consensus?/serde", + "reth-execution-types/serde", + "reth-provider/serde" ] diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b3c7c86d1d1..23eaa99b521 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -27,6 +27,11 @@ pub mod commands; /// made for op-erigon's import needs). pub mod receipt_file_codec; +/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction +/// not having a signature back then. +/// Enables decoding and encoding `Block` types within file contexts. +pub mod ovm_file_codec; + pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; use reth_optimism_chainspec::OpChainSpec; diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs new file mode 100644 index 00000000000..624305c4b6e --- /dev/null +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -0,0 +1,382 @@ +use alloy_consensus::{ + transaction::{from_eip155_value, RlpEcdsaTx}, + Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip4895::Withdrawals, +}; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + keccak256, PrimitiveSignature as Signature, TxHash, B256, U256, +}; +use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; +use derive_more::{AsRef, Deref}; +use op_alloy_consensus::TxDeposit; +use reth_downloaders::file_client::FileClientError; +use reth_primitives::transaction::{Transaction, TxType}; +use serde::{Deserialize, Serialize}; +use tokio_util::codec::Decoder; + +#[allow(dead_code)] +/// Specific codec for reading raw block bodies from a file +/// with optimism-specific signature handling +pub(crate) struct OvmBlockFileCodec; + +impl Decoder for OvmBlockFileCodec { + type Item = Block; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None); + } + + let buf_slice = &mut src.as_ref(); + let body = + Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + src.advance(src.len() - buf_slice.len()); + + Ok(Some(body)) + } +} + +/// OVM block, same as EVM block but with different transaction signature handling +/// Pre-bedrock system transactions on Optimism were sent from the zero address +/// with an empty signature, +#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)] +pub struct Block { + /// Block header + pub header: Header, + /// Block body + pub body: BlockBody, +} + +impl Block { + /// Decodes a `Block` from the given byte slice. + pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let header = Header::decode(buf)?; + let body = BlockBody::decode(buf)?; + Ok(Self { header, body }) + } +} + +/// The body of a block for OVM +#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)] +#[rlp(trailing)] +pub struct BlockBody { + /// Transactions in the block + pub transactions: Vec, + /// Uncle headers for the given block + pub ommers: Vec
, + /// Withdrawals in the block. + pub withdrawals: Option, +} + +/// Signed transaction. +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +pub struct TransactionSigned { + /// Transaction hash + pub hash: TxHash, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: Transaction, +} + +impl Default for TransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: Default::default(), + } + } +} + +impl AsRef for TransactionSigned { + fn as_ref(&self) -> &Self { + self + } +} + +// === impl TransactionSigned === +impl TransactionSigned { + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + pub fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + /// Decodes legacy transaction from the data buffer into a tuple. + /// + /// This expects `rlp(legacy_tx)` + /// + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact + /// format expected. + pub(crate) fn decode_rlp_legacy_transaction_tuple( + data: &mut &[u8], + ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { + let original_encoding = *data; + + let header = alloy_rlp::Header::decode(data)?; + let remaining_len = data.len(); + + let transaction_payload_len = header.payload_length; + + if transaction_payload_len > remaining_len { + return Err(RlpError::InputTooShort); + } + + let mut transaction = TxLegacy { + nonce: Decodable::decode(data)?, + gas_price: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Decodable::decode(data)?, + chain_id: None, + }; + + let v: u64 = Decodable::decode(data)?; + let r: U256 = Decodable::decode(data)?; + let s: U256 = Decodable::decode(data)?; + + let tx_length = header.payload_length + header.length(); + let hash = keccak256(&original_encoding[..tx_length]); + + // Handle both pre-bedrock and regular cases + let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() { + // Pre-bedrock system transactions case + (Signature::new(r, s, false), None) + } else { + // Regular transaction case + let (parity, chain_id) = from_eip155_value(v) + .ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?; + (Signature::new(r, s, parity), chain_id) + }; + + // Set chain ID and verify length + transaction.chain_id = chain_id; + let decoded = remaining_len - data.len(); + if decoded != transaction_payload_len { + return Err(RlpError::UnexpectedLength); + } + + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This should be used _only_ be used in general transaction decoding methods, which have + /// already ensured that the input is a legacy transaction with the following format: + /// `rlp(legacy_tx)` + /// + /// Legacy transactions are encoded as lists, so the input should start with a RLP list header. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + Ok(signed) + } +} + +impl Decodable for TransactionSigned { + /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used + /// by p2p. + /// + /// The p2p encoding format always includes an RLP header, although the type RLP header depends + /// on whether or not the transaction is a legacy transaction. + /// + /// If the transaction is a legacy transaction, it is just encoded as a RLP list: + /// `rlp(tx-data)`. + /// + /// If the transaction is a typed transaction, it is encoded as a RLP string: + /// `rlp(tx-type || rlp(tx-data))` + /// + /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. + /// + /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since + /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. + /// + /// For a method suitable for decoding pooled transactions, see \[`PooledTransactionsElement`\]. + /// + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed + /// transaction is encoded in this format, and does not start with a RLP header: + /// `tx-type || rlp(tx-data)`. + /// + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for TransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.eip2718_encode(&self.signature, out) + } +} + +impl Decodable2718 for TransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + TxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + } + TxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + } + TxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + } + TxType::Eip4844 => { + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + } + TxType::Deposit => Ok(Self::from_transaction_and_signature( + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } +} + +#[cfg(test)] +mod tests { + use crate::ovm_file_codec::TransactionSigned; + use alloy_primitives::{address, hex, TxKind, B256, U256}; + use reth_primitives::transaction::Transaction; + const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; + use alloy_rlp::Decodable; + + #[test] + fn test_decode_legacy_transactions() { + // Test Case 1: contract deposit - regular L2 transaction calling deposit() function + // tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c + let deposit_tx_bytes = hex!("f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"); + let deposit_decoded = TransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap(); + + // Verify deposit transaction + let deposit_tx = match &deposit_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected legacy transaction for NFT deposit"), + }; + + assert_eq!( + deposit_tx.to, + TxKind::Call(address!("a75127121d28a9bf848f3b70e7eea26570aa7700")) + ); + assert_eq!(deposit_tx.nonce, 240); + assert_eq!(deposit_tx.gas_price, 1001500); + assert_eq!(deposit_tx.gas_limit, 814661); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR); + assert_eq!(deposit_tx.chain_id, Some(10)); + assert_eq!( + deposit_decoded.signature.r(), + U256::from_str_radix( + "d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f", + 16 + ) + .unwrap() + ); + assert_eq!( + deposit_decoded.signature.s(), + U256::from_str_radix( + "2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def", + 16 + ) + .unwrap() + ); + + // Test Case 2: pre-bedrock system transaction from block 105235052 + // tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e + let system_tx_bytes = hex!("f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"); + let system_decoded = TransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap(); + + // Verify system transaction + assert!(system_decoded.is_legacy()); + + let system_tx = match &system_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected Legacy transaction"), + }; + + assert_eq!(system_tx.nonce, 887187); + assert_eq!(system_tx.gas_price, 1200000); + assert_eq!(system_tx.gas_limit, 173950); + assert_eq!( + system_tx.to, + TxKind::Call(address!("a0cc33dd6f4819d473226257792afe230ec3c67f")) + ); + assert_eq!(system_tx.value, U256::ZERO); + assert_eq!(system_tx.chain_id, Some(10)); + + assert_eq!( + system_decoded.signature.r(), + U256::from_str_radix( + "e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.signature.s(), + U256::from_str_radix( + "13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.hash, + B256::from(hex!("e20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")) + ); + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 366f59696b2..26815fc38de 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -48,8 +48,12 @@ mod error; mod meta; mod pooled; mod sidecar; -mod signature; mod tx_type; + +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; + pub(crate) mod util; mod variant; @@ -70,9 +74,9 @@ use revm_primitives::{AuthorizationList, TxEnv}; /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; -// Expected number of transactions where we can expect a speed-up by recovering the senders in -// parallel. -pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = +/// Expected number of transactions where we can expect a speed-up by recovering the senders in +/// parallel. +pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = LazyLock::new(|| match rayon::current_num_threads() { 0..=1 => usize::MAX, 2..=8 => 10, From e6a6fc4c2e188c71b6dc67e67549cf38d0bb1d46 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Nov 2024 16:37:21 +0100 Subject: [PATCH 432/970] chore: only fetch deposit info for deposit tx (#12474) --- crates/optimism/rpc/src/eth/transaction.rs | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3ff7cb10df1..20aa379a0c1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -85,6 +85,7 @@ where ) -> Result { let from = tx.signer(); let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let mut deposit_receipt_version = None; let inner = match transaction { reth_primitives::Transaction::Legacy(tx) => { @@ -100,16 +101,23 @@ where reth_primitives::Transaction::Eip7702(tx) => { Signed::new_unchecked(tx, signature, hash).into() } - reth_primitives::Transaction::Deposit(tx) => OpTxEnvelope::Deposit(tx), + reth_primitives::Transaction::Deposit(tx) => { + let deposit_info = self + .inner + .provider() + .receipt_by_hash(hash) + .map_err(Self::Error::from_eth_err)? + .and_then(|receipt| receipt.deposit_receipt_version.zip(receipt.deposit_nonce)); + + if let Some((version, _)) = deposit_info { + deposit_receipt_version = Some(version); + // TODO: set nonce + } + + OpTxEnvelope::Deposit(tx) + } }; - let deposit_receipt_version = self - .inner - .provider() - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - .and_then(|receipt| receipt.deposit_receipt_version); - let TransactionInfo { block_hash, block_number, index: transaction_index, base_fee, .. } = tx_info; From 4a8eb7a0c0a08504f1241d421b85d588e7bc3915 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Nov 2024 18:19:06 +0100 Subject: [PATCH 433/970] chore: add DUPSORT trait const (#12477) --- crates/storage/db-api/src/table.rs | 3 +++ crates/storage/db/src/tables/mod.rs | 1 + crates/storage/db/src/tables/raw.rs | 2 ++ 3 files changed, 6 insertions(+) diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 963457af05c..acdc8efc78f 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -88,6 +88,9 @@ pub trait Table: Send + Sync + Debug + 'static { /// The table's name. const NAME: &'static str; + /// Whether the table is also a `DUPSORT` table. + const DUPSORT: bool; + /// Key element of `Table`. /// /// Sorting should be taken into account when encoding this. diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index c697c319909..cf7d23a1272 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -140,6 +140,7 @@ macro_rules! tables { $value: reth_db_api::table::Value + 'static { const NAME: &'static str = table_names::$name; + const DUPSORT: bool = tables!(@bool $($subkey)?); type Key = $key; type Value = $value; diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 6b6de41613e..453116ee5e3 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -14,6 +14,7 @@ pub struct RawTable { impl Table for RawTable { const NAME: &'static str = T::NAME; + const DUPSORT: bool = false; type Key = RawKey; type Value = RawValue; @@ -28,6 +29,7 @@ pub struct RawDupSort { impl Table for RawDupSort { const NAME: &'static str = T::NAME; + const DUPSORT: bool = true; type Key = RawKey; type Value = RawValue; From fa5daef07dd262eb7fc0f78f07752b8bedbc3fa7 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:35:51 +0100 Subject: [PATCH 434/970] chore: Move `HistoryWriter` trait to `storage-api` and reexport it from old `provider` crate (#12480) --- Cargo.lock | 1 + crates/storage/provider/src/lib.rs | 3 +++ crates/storage/provider/src/traits/mod.rs | 3 --- crates/storage/storage-api/Cargo.toml | 1 + .../{provider/src/traits => storage-api/src}/history.rs | 0 crates/storage/storage-api/src/lib.rs | 3 +++ 6 files changed, 8 insertions(+), 3 deletions(-) rename crates/storage/{provider/src/traits => storage-api/src}/history.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 7d9772a3909..0ad5c1acd6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9120,6 +9120,7 @@ dependencies = [ "alloy-primitives", "auto_impl", "reth-chainspec", + "reth-db", "reth-db-api", "reth-db-models", "reth-execution-types", diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 894a41620c5..2b002fe11ec 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -46,6 +46,9 @@ pub use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, }; +// reexport HistoryWriter trait +pub use reth_storage_api::HistoryWriter; + pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c31c7c1e2f2..722721525bf 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -26,9 +26,6 @@ pub use hashing::HashingWriter; mod trie; pub use trie::{StorageTrieWriter, TrieWriter}; -mod history; -pub use history::HistoryWriter; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 0ae8b284588..32aadc1922d 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -22,6 +22,7 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-db.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/storage-api/src/history.rs similarity index 100% rename from crates/storage/provider/src/traits/history.rs rename to crates/storage/storage-api/src/history.rs diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 4e589242a91..21d02325afe 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -53,3 +53,6 @@ mod database_provider; pub use database_provider::*; pub mod noop; + +mod history; +pub use history::*; From b39957612a88640272a051858bad5cab840d6287 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 12 Nov 2024 23:29:42 +0400 Subject: [PATCH 435/970] feat: make more network components generic over primitives (#12481) --- Cargo.lock | 2 + crates/net/eth-wire-types/Cargo.toml | 5 +- crates/net/eth-wire-types/src/primitives.rs | 5 +- crates/net/network/Cargo.toml | 4 +- crates/net/network/src/import.rs | 22 ++-- crates/net/network/src/message.rs | 15 +-- crates/net/network/src/session/active.rs | 109 ++++++++++---------- crates/net/network/src/session/conn.rs | 34 +++--- crates/net/network/src/session/handle.rs | 22 ++-- crates/net/network/src/session/mod.rs | 54 +++++----- crates/net/network/src/state.rs | 25 +++-- crates/net/network/src/swarm.rs | 42 ++++---- crates/primitives-traits/src/block/body.rs | 7 +- crates/primitives-traits/src/block/mod.rs | 73 +------------ crates/primitives/src/block.rs | 13 +++ 15 files changed, 197 insertions(+), 235 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ad5c1acd6a..5ce7b0c0ec7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7349,6 +7349,7 @@ dependencies = [ "reth-chainspec", "reth-codecs-derive", "reth-primitives", + "reth-primitives-traits", "serde", "thiserror", ] @@ -7784,6 +7785,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 582ab7557f3..9ce712bf87a 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -16,9 +16,9 @@ workspace = true reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum -alloy-consensus.workspace = true alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true @@ -54,7 +54,8 @@ arbitrary = [ "reth-chainspec/arbitrary", "alloy-consensus/arbitrary", "alloy-eips/arbitrary", - "alloy-primitives/arbitrary" + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", ] serde = [ "dep:serde", diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 04b8b429e2a..15cfaaff0a2 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -2,8 +2,8 @@ use std::fmt::Debug; -use alloy_consensus::BlockHeader; use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::{Block, BlockHeader}; /// Abstraction over primitive types which might appear in network messages. See /// [`crate::EthMessage`] for more context. @@ -34,7 +34,8 @@ pub trait NetworkPrimitives: + Eq + 'static; /// Full block type. - type Block: Encodable + type Block: Block
+ + Encodable + Decodable + Send + Sync diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 148eef34b36..09f81e63e54 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-net-banlist.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true @@ -130,7 +131,8 @@ test-utils = [ "reth-discv4/test-utils", "reth-network/test-utils", "reth-network-p2p/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", ] [[bench]] diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 201dc3e4f78..749b3c347b3 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -7,7 +7,7 @@ use reth_network_peers::PeerId; use crate::message::NewBlockMessage; /// Abstraction over block import. -pub trait BlockImport: std::fmt::Debug + Send + Sync { +pub trait BlockImport: std::fmt::Debug + Send + Sync { /// Invoked for a received `NewBlock` broadcast message from the peer. /// /// > When a `NewBlock` announcement message is received from a peer, the client first verifies @@ -15,35 +15,35 @@ pub trait BlockImport: std::fmt::Debug + Send + Sync { /// /// This is supposed to start verification. The results are then expected to be returned via /// [`BlockImport::poll`]. - fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); + fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); /// Returns the results of a [`BlockImport::on_new_block`] - fn poll(&mut self, cx: &mut Context<'_>) -> Poll; + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; } /// Outcome of the [`BlockImport`]'s block handling. #[derive(Debug)] -pub struct BlockImportOutcome { +pub struct BlockImportOutcome { /// Sender of the `NewBlock` message. pub peer: PeerId, /// The result after validating the block - pub result: Result, + pub result: Result, BlockImportError>, } /// Represents the successful validation of a received `NewBlock` message. #[derive(Debug)] -pub enum BlockValidation { +pub enum BlockValidation { /// Basic Header validity check, after which the block should be relayed to peers via a /// `NewBlock` message ValidHeader { /// received block - block: NewBlockMessage, + block: NewBlockMessage, }, /// Successfully imported: state-root matches after execution. The block should be relayed via /// `NewBlockHashes` ValidBlock { /// validated block. - block: NewBlockMessage, + block: NewBlockMessage, }, } @@ -62,10 +62,10 @@ pub enum BlockImportError { #[non_exhaustive] pub struct ProofOfStakeBlockImport; -impl BlockImport for ProofOfStakeBlockImport { - fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} +impl BlockImport for ProofOfStakeBlockImport { + fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} - fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Pending } } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index bdb13875f12..3040577415c 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -8,6 +8,7 @@ use std::{ task::{ready, Context, Poll}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; use reth_eth_wire::{ @@ -23,30 +24,30 @@ use tokio::sync::oneshot; /// Internal form of a `NewBlock` message #[derive(Debug, Clone)] -pub struct NewBlockMessage { +pub struct NewBlockMessage { /// Hash of the block pub hash: B256, /// Raw received message - pub block: Arc, + pub block: Arc>, } // === impl NewBlockMessage === -impl NewBlockMessage { +impl NewBlockMessage { /// Returns the block number of the block pub fn number(&self) -> u64 { - self.block.block.header.number + self.block.block.header().number() } } /// All Bi-directional eth-message variants that can be sent to a session or received from a /// session. #[derive(Debug)] -pub enum PeerMessage { +pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(NewBlockMessage), + NewBlock(NewBlockMessage), /// Received transactions _from_ the peer ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. @@ -54,7 +55,7 @@ pub enum PeerMessage { /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. - EthRequest(PeerRequest), + EthRequest(PeerRequest), /// Other than eth namespace message Other(RawCapabilityMessage), } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 10048823c54..f979a912cd4 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,18 +11,20 @@ use std::{ time::{Duration, Instant}, }; +use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, - Capabilities, DisconnectP2P, DisconnectReason, EthMessage, + Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, }; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::PeerRequest; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; +use reth_primitives_traits::Block; use rustc_hash::FxHashMap; use tokio::{ sync::{mpsc::error::TrySendError, oneshot}, @@ -62,11 +64,11 @@ const TIMEOUT_SCALING: u32 = 3; /// - incoming requests/broadcasts _from remote_ via the connection /// - responses for handled ETH requests received from the remote peer. #[allow(dead_code)] -pub(crate) struct ActiveSession { +pub(crate) struct ActiveSession { /// Keeps track of request ids. pub(crate) next_id: u64, /// The underlying connection. - pub(crate) conn: EthRlpxConnection, + pub(crate) conn: EthRlpxConnection, /// Identifier of the node we're connected to. pub(crate) remote_peer_id: PeerId, /// The address we're connected to. @@ -76,19 +78,19 @@ pub(crate) struct ActiveSession { /// Internal identifier of this session pub(crate) session_id: SessionId, /// Incoming commands from the manager - pub(crate) commands_rx: ReceiverStream, + pub(crate) commands_rx: ReceiverStream>, /// Sink to send messages to the [`SessionManager`](super::SessionManager). - pub(crate) to_session_manager: MeteredPollSender, + pub(crate) to_session_manager: MeteredPollSender>, /// A message that needs to be delivered to the session manager - pub(crate) pending_message_to_session: Option, + pub(crate) pending_message_to_session: Option>, /// Incoming internal requests which are delegated to the remote peer. - pub(crate) internal_request_tx: Fuse>, + pub(crate) internal_request_tx: Fuse>>, /// All requests sent to the remote peer we're waiting on a response - pub(crate) inflight_requests: FxHashMap, + pub(crate) inflight_requests: FxHashMap>>, /// All requests that were sent by the remote peer and we're waiting on an internal response - pub(crate) received_requests_from_remote: Vec, + pub(crate) received_requests_from_remote: Vec>, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: QueuedOutgoingMessages, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -97,10 +99,11 @@ pub(crate) struct ActiveSession { /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, /// Used to reserve a slot to guarantee that the termination message is delivered - pub(crate) terminate_message: Option<(PollSender, ActiveSessionMessage)>, + pub(crate) terminate_message: + Option<(PollSender>, ActiveSessionMessage)>, } -impl ActiveSession { +impl ActiveSession { /// Returns `true` if the session is currently in the process of disconnecting fn is_disconnecting(&self) -> bool { self.conn.inner().is_disconnecting() @@ -122,7 +125,7 @@ impl ActiveSession { /// Handle a message read from the connection. /// /// Returns an error if the message is considered to be in violation of the protocol. - fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { + fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { /// A macro that handles an incoming request /// This creates a new channel and tries to send the sender half to the session while /// storing the receiver half internally so the pending response can be polled. @@ -182,7 +185,7 @@ impl ActiveSession { } EthMessage::NewBlock(msg) => { let block = - NewBlockMessage { hash: msg.block.header.hash_slow(), block: Arc::new(*msg) }; + NewBlockMessage { hash: msg.block.header().hash_slow(), block: Arc::new(*msg) }; self.try_emit_broadcast(PeerMessage::NewBlock(block)).into() } EthMessage::Transactions(msg) => { @@ -238,7 +241,7 @@ impl ActiveSession { } /// Handle an internal peer request that will be sent to the remote. - fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { + fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); let msg = request.create_request_message(request_id); self.queued_outgoing.push_back(msg.into()); @@ -251,7 +254,7 @@ impl ActiveSession { } /// Handle a message received from the internal network - fn on_internal_peer_message(&mut self, msg: PeerMessage) { + fn on_internal_peer_message(&mut self, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(msg) => { self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into()); @@ -289,7 +292,7 @@ impl ActiveSession { /// Handle a Response to the peer /// /// This will queue the response to be sent to the peer - fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { + fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { match resp.try_into_message(id) { Ok(msg) => { self.queued_outgoing.push_back(msg.into()); @@ -304,7 +307,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -330,7 +333,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -470,7 +473,7 @@ impl ActiveSession { } } -impl Future for ActiveSession { +impl Future for ActiveSession { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -656,20 +659,20 @@ impl Future for ActiveSession { } /// Tracks a request received from the peer -pub(crate) struct ReceivedRequest { +pub(crate) struct ReceivedRequest { /// Protocol Identifier request_id: u64, /// Receiver half of the channel that's supposed to receive the proper response. - rx: PeerResponse, + rx: PeerResponse, /// Timestamp when we read this msg from the wire. #[allow(dead_code)] received: Instant, } /// A request that waits for a response from the peer -pub(crate) struct InflightRequest { +pub(crate) struct InflightRequest { /// Request we sent to peer and the internal response channel - request: RequestState, + request: RequestState, /// Instant when the request was sent timestamp: Instant, /// Time limit for the response @@ -678,7 +681,7 @@ pub(crate) struct InflightRequest { // === impl InflightRequest === -impl InflightRequest { +impl InflightRequest> { /// Returns true if the request is timedout #[inline] fn is_timed_out(&self, now: Instant) -> bool { @@ -703,17 +706,19 @@ impl InflightRequest { } /// All outcome variants when handling an incoming message -enum OnIncomingMessageOutcome { +enum OnIncomingMessageOutcome { /// Message successfully handled. Ok, /// Message is considered to be in violation of the protocol - BadMessage { error: EthStreamError, message: EthMessage }, + BadMessage { error: EthStreamError, message: EthMessage }, /// Currently no capacity to handle the message - NoCapacity(ActiveSessionMessage), + NoCapacity(ActiveSessionMessage), } -impl From> for OnIncomingMessageOutcome { - fn from(res: Result<(), ActiveSessionMessage>) -> Self { +impl From>> + for OnIncomingMessageOutcome +{ + fn from(res: Result<(), ActiveSessionMessage>) -> Self { match res { Ok(_) => Self::Ok, Err(msg) => Self::NoCapacity(msg), @@ -721,29 +726,29 @@ impl From> for OnIncomingMessageOutcome { } } -enum RequestState { +enum RequestState { /// Waiting for the response - Waiting(PeerRequest), + Waiting(R), /// Request already timed out TimedOut, } /// Outgoing messages that can be sent over the wire. -pub(crate) enum OutgoingMessage { +pub(crate) enum OutgoingMessage { /// A message that is owned. - Eth(EthMessage), + Eth(EthMessage), /// A message that may be shared by multiple sessions. - Broadcast(EthBroadcastMessage), + Broadcast(EthBroadcastMessage), } -impl From for OutgoingMessage { - fn from(value: EthMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthMessage) -> Self { Self::Eth(value) } } -impl From for OutgoingMessage { - fn from(value: EthBroadcastMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthBroadcastMessage) -> Self { Self::Broadcast(value) } } @@ -760,22 +765,22 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> } /// A helper struct that wraps the queue of outgoing messages and a metric to track their count -pub(crate) struct QueuedOutgoingMessages { - messages: VecDeque, +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque>, count: Gauge, } -impl QueuedOutgoingMessages { +impl QueuedOutgoingMessages { pub(crate) const fn new(metric: Gauge) -> Self { Self { messages: VecDeque::new(), count: metric } } - pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { self.messages.push_back(message); self.count.increment(1); } - pub(crate) fn pop_front(&mut self) -> Option { + pub(crate) fn pop_front(&mut self) -> Option> { self.messages.pop_front().inspect(|_| self.count.decrement(1)) } @@ -791,8 +796,8 @@ mod tests { use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ - EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, - UnauthedEthStream, UnauthedP2PStream, + EthNetworkPrimitives, EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, + Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; @@ -808,11 +813,11 @@ mod tests { HelloMessageWithProtocols::builder(pk2id(&server_key.public_key(SECP256K1))).build() } - struct SessionBuilder { + struct SessionBuilder { _remote_capabilities: Arc, - active_session_tx: mpsc::Sender, - active_session_rx: ReceiverStream, - to_sessions: Vec>, + active_session_tx: mpsc::Sender>, + active_session_rx: ReceiverStream>, + to_sessions: Vec>>, secret_key: SecretKey, local_peer_id: PeerId, hello: HelloMessageWithProtocols, @@ -821,7 +826,7 @@ mod tests { next_id: usize, } - impl SessionBuilder { + impl SessionBuilder { fn next_id(&mut self) -> SessionId { let id = self.next_id; self.next_id += 1; @@ -858,7 +863,7 @@ mod tests { }) } - async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { + async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { let remote_addr = stream.local_addr().unwrap(); let session_id = self.next_id(); let (_disconnect_tx, disconnect_rx) = oneshot::channel(); diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 628c880c8ea..5329f01028b 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -11,16 +11,16 @@ use reth_eth_wire::{ errors::EthStreamError, message::EthBroadcastMessage, multiplex::{ProtocolProxy, RlpxSatelliteStream}, - EthMessage, EthStream, EthVersion, P2PStream, + EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, }; use tokio::net::TcpStream; /// The type of the underlying peer network connection. -pub type EthPeerConnection = EthStream>>; +pub type EthPeerConnection = EthStream>, N>; /// Various connection types that at least support the ETH protocol. -pub type EthSatelliteConnection = - RlpxSatelliteStream, EthStream>; +pub type EthSatelliteConnection = + RlpxSatelliteStream, EthStream>; /// Connection types that support the ETH protocol. /// @@ -30,14 +30,14 @@ pub type EthSatelliteConnection = // This type is boxed because the underlying stream is ~6KB, // mostly coming from `P2PStream`'s `snap::Encoder` (2072), and `ECIESStream` (3600). #[derive(Debug)] -pub enum EthRlpxConnection { +pub enum EthRlpxConnection { /// A connection that only supports the ETH protocol. - EthOnly(Box), + EthOnly(Box>), /// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol. - Satellite(Box), + Satellite(Box>), } -impl EthRlpxConnection { +impl EthRlpxConnection { /// Returns the negotiated ETH version. #[inline] pub(crate) const fn version(&self) -> EthVersion { @@ -78,7 +78,7 @@ impl EthRlpxConnection { #[inline] pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { match self { Self::EthOnly(conn) => conn.start_send_broadcast(item), @@ -87,16 +87,16 @@ impl EthRlpxConnection { } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthPeerConnection) -> Self { + fn from(conn: EthPeerConnection) -> Self { Self::EthOnly(Box::new(conn)) } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthSatelliteConnection) -> Self { + fn from(conn: EthSatelliteConnection) -> Self { Self::Satellite(Box::new(conn)) } } @@ -112,22 +112,22 @@ macro_rules! delegate_call { } } -impl Stream for EthRlpxConnection { - type Item = Result; +impl Stream for EthRlpxConnection { + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_next(cx)) } } -impl Sink for EthRlpxConnection { +impl Sink> for EthRlpxConnection { type Error = EthStreamError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_ready(cx)) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { delegate_call!(self.start_send(item)) } diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index a022e670419..f80428630d9 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -5,7 +5,7 @@ use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, + EthVersion, NetworkPrimitives, Status, }; use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; @@ -54,7 +54,7 @@ impl PendingSessionHandle { /// Within an active session that supports the `Ethereum Wire Protocol `, three high-level tasks can /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] -pub struct ActiveSessionHandle { +pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, /// The assigned id for this session @@ -68,7 +68,7 @@ pub struct ActiveSessionHandle { /// Announced capabilities of the peer. pub(crate) capabilities: Arc, /// Sender half of the command channel used send commands _to_ the spawned session - pub(crate) commands_to_session: mpsc::Sender, + pub(crate) commands_to_session: mpsc::Sender>, /// The client's name and version pub(crate) client_version: Arc, /// The address we're connected to @@ -81,7 +81,7 @@ pub struct ActiveSessionHandle { // === impl ActiveSessionHandle === -impl ActiveSessionHandle { +impl ActiveSessionHandle { /// Sends a disconnect command to the session. pub fn disconnect(&self, reason: Option) { // Note: we clone the sender which ensures the channel has capacity to send the message @@ -93,7 +93,7 @@ impl ActiveSessionHandle { pub async fn try_disconnect( &self, reason: Option, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await } @@ -162,7 +162,7 @@ impl ActiveSessionHandle { /// /// A session starts with a `Handshake`, followed by a `Hello` message which #[derive(Debug)] -pub enum PendingSessionEvent { +pub enum PendingSessionEvent { /// Represents a successful `Hello` and `Status` exchange: Established { /// An internal identifier for the established session @@ -179,7 +179,7 @@ pub enum PendingSessionEvent { status: Arc, /// The actual connection stream which can be used to send and receive `eth` protocol /// messages - conn: EthRlpxConnection, + conn: EthRlpxConnection, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The remote node's user agent, usually containing the client name and version @@ -222,20 +222,20 @@ pub enum PendingSessionEvent { /// Commands that can be sent to the spawned session. #[derive(Debug)] -pub enum SessionCommand { +pub enum SessionCommand { /// Disconnect the connection Disconnect { /// Why the disconnect was initiated reason: Option, }, /// Sends a message to the peer - Message(PeerMessage), + Message(PeerMessage), } /// Message variants an active session can produce and send back to the /// [`SessionManager`](crate::session::SessionManager) #[derive(Debug)] -pub enum ActiveSessionMessage { +pub enum ActiveSessionMessage { /// Session was gracefully disconnected. Disconnected { /// The remote node's public key @@ -257,7 +257,7 @@ pub enum ActiveSessionMessage { /// Identifier of the remote peer. peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 30b1cda9da9..a95f0e88910 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -28,11 +28,11 @@ use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, multiplex::RlpxProtocolMultiplexer, - Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, Status, - UnauthedEthStream, UnauthedP2PStream, + Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, + Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; @@ -62,7 +62,7 @@ pub struct SessionId(usize); /// Manages a set of sessions. #[must_use = "Session Manager must be polled to process session events."] #[derive(Debug)] -pub struct SessionManager { +pub struct SessionManager { /// Tracks the identifier for the next session. next_id: usize, /// Keeps track of all sessions @@ -93,21 +93,21 @@ pub struct SessionManager { /// session is authenticated, it can be moved to the `active_session` set. pending_sessions: FxHashMap, /// All active sessions that are ready to exchange messages. - active_sessions: HashMap, + active_sessions: HashMap>, /// The original Sender half of the [`PendingSessionEvent`] channel. /// /// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will /// get a clone of this sender half. - pending_sessions_tx: mpsc::Sender, + pending_sessions_tx: mpsc::Sender>, /// Receiver half that listens for [`PendingSessionEvent`] produced by pending sessions. - pending_session_rx: ReceiverStream, + pending_session_rx: ReceiverStream>, /// The original Sender half of the [`ActiveSessionMessage`] channel. /// /// When active session state is reached, the corresponding [`ActiveSessionHandle`] will get a /// clone of this sender half. - active_session_tx: MeteredPollSender, + active_session_tx: MeteredPollSender>, /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. - active_session_rx: ReceiverStream, + active_session_rx: ReceiverStream>, /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, /// Tracks the ongoing graceful disconnections attempts for incoming connections. @@ -118,7 +118,7 @@ pub struct SessionManager { // === impl SessionManager === -impl SessionManager { +impl SessionManager { /// Creates a new empty [`SessionManager`]. #[allow(clippy::too_many_arguments)] pub fn new( @@ -182,7 +182,7 @@ impl SessionManager { } /// Returns a borrowed reference to the active sessions. - pub const fn active_sessions(&self) -> &HashMap { + pub const fn active_sessions(&self) -> &HashMap> { &self.active_sessions } @@ -348,7 +348,7 @@ impl SessionManager { } /// Sends a message to the peer's session - pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { if let Some(session) = self.active_sessions.get_mut(peer_id) { let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)).inspect_err( |e| { @@ -373,7 +373,7 @@ impl SessionManager { } /// Removes the [`PendingSessionHandle`] if it exists. - fn remove_active_session(&mut self, id: &PeerId) -> Option { + fn remove_active_session(&mut self, id: &PeerId) -> Option> { let session = self.active_sessions.remove(id)?; self.counter.dec_active(&session.direction); Some(session) @@ -411,7 +411,7 @@ impl SessionManager { /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // Poll events from active sessions match self.active_session_rx.poll_next_unpin(cx) { Poll::Pending => {} @@ -663,7 +663,7 @@ impl DisconnectionsCounter { /// Events produced by the [`SessionManager`] #[derive(Debug)] -pub enum SessionEvent { +pub enum SessionEvent { /// A new session was successfully authenticated. /// /// This session is now able to exchange data. @@ -681,7 +681,7 @@ pub enum SessionEvent { /// The Status message the peer sent during the `eth` handshake status: Arc, /// The channel for sending messages to the peer with the session - messages: PeerRequestSender, + messages: PeerRequestSender>, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The maximum time that the session waits for a response from the peer before timing out @@ -702,7 +702,7 @@ pub enum SessionEvent { /// The remote node's public key peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { @@ -797,12 +797,12 @@ impl PendingSessionHandshakeError { pub struct ExceedsSessionLimit(pub(crate) u32); /// Starts a pending session authentication with a timeout. -pub(crate) async fn pending_session_with_timeout( +pub(crate) async fn pending_session_with_timeout( timeout: Duration, session_id: SessionId, remote_addr: SocketAddr, direction: Direction, - events: mpsc::Sender, + events: mpsc::Sender>, f: F, ) where F: Future, @@ -823,11 +823,11 @@ pub(crate) async fn pending_session_with_timeout( /// /// This will wait for the _incoming_ handshake request and answer it. #[allow(clippy::too_many_arguments)] -pub(crate) async fn start_pending_incoming_session( +pub(crate) async fn start_pending_incoming_session( disconnect_rx: oneshot::Receiver<()>, session_id: SessionId, stream: TcpStream, - events: mpsc::Sender, + events: mpsc::Sender>, remote_addr: SocketAddr, secret_key: SecretKey, hello: HelloMessageWithProtocols, @@ -854,9 +854,9 @@ pub(crate) async fn start_pending_incoming_session( /// Starts the authentication process for a connection initiated by a remote peer. #[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] #[allow(clippy::too_many_arguments)] -async fn start_pending_outbound_session( +async fn start_pending_outbound_session( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, session_id: SessionId, remote_addr: SocketAddr, remote_peer_id: PeerId, @@ -903,9 +903,9 @@ async fn start_pending_outbound_session( /// Authenticates a session #[allow(clippy::too_many_arguments)] -async fn authenticate( +async fn authenticate( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, stream: TcpStream, session_id: SessionId, remote_addr: SocketAddr, @@ -986,7 +986,7 @@ async fn get_ecies_stream( /// If additional [`RlpxSubProtocolHandlers`] are provided, the hello message will be updated to /// also negotiate the additional protocols. #[allow(clippy::too_many_arguments)] -async fn authenticate_stream( +async fn authenticate_stream( stream: UnauthedP2PStream>, session_id: SessionId, remote_addr: SocketAddr, @@ -996,7 +996,7 @@ async fn authenticate_stream( mut status: Status, fork_filter: ForkFilter, mut extra_handlers: RlpxSubProtocolHandlers, -) -> PendingSessionEvent { +) -> PendingSessionEvent { // Add extra protocols to the hello message extra_handlers.retain(|handler| hello.try_add_protocol(handler.protocol()).is_ok()); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 9ad7b53518b..3bafbf25856 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -12,6 +12,7 @@ use std::{ task::{Context, Poll}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use rand::seq::SliceRandom; use reth_eth_wire::{ @@ -22,6 +23,7 @@ use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequest use reth_network_peers::PeerId; use reth_network_types::{PeerAddr, PeerKind}; use reth_primitives::ForkId; +use reth_primitives_traits::Block; use tokio::sync::oneshot; use tracing::{debug, trace}; @@ -78,7 +80,7 @@ pub struct NetworkState { /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. - queued_messages: VecDeque, + queued_messages: VecDeque>, /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -185,12 +187,12 @@ impl NetworkState { /// > the total number of peers) using the `NewBlock` message. /// /// See also - pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block.header.number; + let number = msg.block.block.header().number(); let mut count = 0; // Shuffle to propagate to a random sample of peers on every block announcement @@ -227,8 +229,8 @@ impl NetworkState { /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. - pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { - let number = msg.block.block.header.number; + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block.header().number(); let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]); for (peer_id, peer) in &mut self.active_peers { if peer.blocks.contains(&msg.hash) { @@ -385,7 +387,10 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) -> Option { + fn on_block_response_outcome( + &mut self, + outcome: BlockResponseOutcome, + ) -> Option> { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -406,7 +411,7 @@ impl NetworkState { &mut self, peer: PeerId, resp: PeerResponseResult, - ) -> Option { + ) -> Option> { match resp { PeerResponseResult::BlockHeaders(res) => { let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; @@ -421,7 +426,7 @@ impl NetworkState { } /// Advances the state - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { loop { // drain buffered messages if let Some(message) = self.queued_messages.pop_front() { @@ -515,13 +520,13 @@ pub(crate) struct ActivePeer { /// Message variants triggered by the [`NetworkState`] #[derive(Debug)] -pub(crate) enum StateAction { +pub(crate) enum StateAction { /// Dispatch a `NewBlock` message to the peer NewBlock { /// Target of the message peer_id: PeerId, /// The `NewBlock` message - block: NewBlockMessage, + block: NewBlockMessage, }, NewBlockHashes { /// Target of the message diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index c1fe9f9e231..655934f207a 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -9,9 +9,9 @@ use std::{ use futures::Stream; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, + EthNetworkPrimitives, EthVersion, NetworkPrimitives, Status, }; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use tracing::trace; @@ -50,23 +50,23 @@ use crate::{ /// `include_mmd!("docs/mermaid/swarm.mmd`") #[derive(Debug)] #[must_use = "Swarm does nothing unless polled"] -pub(crate) struct Swarm { +pub(crate) struct Swarm { /// Listens for new incoming connections. incoming: ConnectionListener, /// All sessions. - sessions: SessionManager, + sessions: SessionManager, /// Tracks the entire state of the network and handles events received from the sessions. - state: NetworkState, + state: NetworkState, } // === impl Swarm === -impl Swarm { +impl Swarm { /// Configures a new swarm instance. pub(crate) const fn new( incoming: ConnectionListener, - sessions: SessionManager, - state: NetworkState, + sessions: SessionManager, + state: NetworkState, ) -> Self { Self { incoming, sessions, state } } @@ -77,12 +77,12 @@ impl Swarm { } /// Access to the state. - pub(crate) const fn state(&self) -> &NetworkState { + pub(crate) const fn state(&self) -> &NetworkState { &self.state } /// Mutable access to the state. - pub(crate) fn state_mut(&mut self) -> &mut NetworkState { + pub(crate) fn state_mut(&mut self) -> &mut NetworkState { &mut self.state } @@ -92,17 +92,17 @@ impl Swarm { } /// Access to the [`SessionManager`]. - pub(crate) const fn sessions(&self) -> &SessionManager { + pub(crate) const fn sessions(&self) -> &SessionManager { &self.sessions } /// Mutable access to the [`SessionManager`]. - pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { + pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { &mut self.sessions } } -impl Swarm { +impl Swarm { /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) @@ -112,7 +112,7 @@ impl Swarm { /// /// This either updates the state or produces a new [`SwarmEvent`] that is bubbled up to the /// manager. - fn on_session_event(&mut self, event: SessionEvent) -> Option { + fn on_session_event(&mut self, event: SessionEvent) -> Option> { match event { SessionEvent::SessionEstablished { peer_id, @@ -181,7 +181,7 @@ impl Swarm { /// Callback for events produced by [`ConnectionListener`]. /// /// Depending on the event, this will produce a new [`SwarmEvent`]. - fn on_connection(&mut self, event: ListenerEvent) -> Option { + fn on_connection(&mut self, event: ListenerEvent) -> Option> { match event { ListenerEvent::Error(err) => return Some(SwarmEvent::TcpListenerError(err)), ListenerEvent::ListenerClosed { local_address: address } => { @@ -229,7 +229,7 @@ impl Swarm { } /// Hook for actions pulled from the state - fn on_state_action(&mut self, event: StateAction) -> Option { + fn on_state_action(&mut self, event: StateAction) -> Option> { match event { StateAction::Connect { remote_addr, peer_id } => { self.dial_outbound(remote_addr, peer_id); @@ -286,8 +286,8 @@ impl Swarm { } } -impl Stream for Swarm { - type Item = SwarmEvent; +impl Stream for Swarm { + type Item = SwarmEvent; /// This advances all components. /// @@ -338,13 +338,13 @@ impl Stream for Swarm { /// All events created or delegated by the [`Swarm`] that represents changes to the state of the /// network. -pub(crate) enum SwarmEvent { +pub(crate) enum SwarmEvent { /// Events related to the actual network protocol. ValidMessage { /// The peer that sent the message peer_id: PeerId, /// Message received from the peer - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { @@ -394,7 +394,7 @@ pub(crate) enum SwarmEvent { capabilities: Arc, /// negotiated eth version version: EthVersion, - messages: PeerRequestSender, + messages: PeerRequestSender>, status: Arc, direction: Direction, }, diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 14941ffed0f..6ec184a2154 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -6,7 +6,7 @@ use alloy_consensus::{BlockHeader, Transaction, TxType}; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256}; -use crate::{Block, InMemorySize}; +use crate::InMemorySize; /// Abstraction for block's body. pub trait BlockBody: @@ -47,11 +47,6 @@ pub trait BlockBody: /// Returns [`Requests`] in block, if any. fn requests(&self) -> Option<&Requests>; - /// Create a [`Block`] from the body and its header. - fn into_block>(self, header: Self::Header) -> T { - T::from((header, self)) - } - /// Calculate the transaction root for the block body. fn calculate_tx_root(&self) -> B256; diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index cfc9e9a5503..33008c4381d 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -3,12 +3,11 @@ pub mod body; pub mod header; -use alloc::{fmt, vec::Vec}; +use alloc::fmt; -use alloy_primitives::{Address, B256}; use reth_codecs::Compact; -use crate::{BlockBody, BlockHeader, FullBlockHeader, InMemorySize}; +use crate::{BlockHeader, FullBlockHeader, InMemorySize}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: Block + Compact {} @@ -30,79 +29,17 @@ pub trait Block: + Eq + serde::Serialize + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> + InMemorySize { /// Header part of the block. type Header: BlockHeader; /// The block's body contains the transactions in the block. - type Body: BlockBody; + type Body: Send + Sync + Unpin + 'static; - /// A block and block hash. - type SealedBlock; - - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. + /// Returns reference to block header. fn header(&self) -> &Self::Header; - /// Returns reference to [`BlockBody`] type. + /// Returns reference to block body. fn body(&self) -> &Self::Body; - - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// `SealedBlockWithSenders`. - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a `BlockWithSenders`. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a `BlockWithSenders` using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also `SignedTransaction::recover_signer_unchecked`. - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self>; - - /// **Expensive**. Transform into a `BlockWithSenders` by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option>; } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index c0586ed6a8f..6743cab3dc3 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -87,6 +87,19 @@ impl Block { } } +impl reth_primitives_traits::Block for Block { + type Header = Header; + type Body = BlockBody; + + fn body(&self) -> &Self::Body { + &self.body + } + + fn header(&self) -> &Self::Header { + &self.header + } +} + impl InMemorySize for Block { /// Calculates a heuristic for the in-memory size of the [`Block`]. #[inline] From 98841676847980245107f03500a48fa5e8fb0436 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Nov 2024 20:40:29 +0100 Subject: [PATCH 436/970] chore: move standalone types to types crate (#12483) --- crates/static-file/static-file/src/lib.rs | 4 +- .../static-file/src/static_file_producer.rs | 36 +---------------- .../{static-file => types}/src/event.rs | 2 +- crates/static-file/types/src/lib.rs | 40 +++++++++++++++++++ 4 files changed, 43 insertions(+), 39 deletions(-) rename crates/static-file/{static-file => types}/src/event.rs (87%) diff --git a/crates/static-file/static-file/src/lib.rs b/crates/static-file/static-file/src/lib.rs index 1bfe4134e95..6c95baaae92 100644 --- a/crates/static-file/static-file/src/lib.rs +++ b/crates/static-file/static-file/src/lib.rs @@ -7,14 +7,12 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event; pub mod segments; mod static_file_producer; -pub use event::StaticFileProducerEvent; pub use static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult, - StaticFileProducerWithResult, StaticFileTargets, + StaticFileProducerWithResult, }; // Re-export for convenience. diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2c442aedfa3..0f07ec32821 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -10,7 +10,7 @@ use reth_provider::{ }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; -use reth_static_file_types::HighestStaticFiles; +use reth_static_file_types::{HighestStaticFiles, StaticFileTargets}; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ @@ -66,40 +66,6 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data segment, measured in [`BlockNumber`]. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct StaticFileTargets { - headers: Option>, - receipts: Option>, - transactions: Option>, -} - -impl StaticFileTargets { - /// Returns `true` if any of the targets are [Some]. - pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() - } - - // Returns `true` if all targets are either [`None`] or has beginning of the range equal to the - // highest static_file. - fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { - *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) - }) - }) - } -} - impl StaticFileProducerInner { fn new(provider: Provider, prune_modes: PruneModes) -> Self { Self { provider, prune_modes, event_sender: Default::default() } diff --git a/crates/static-file/static-file/src/event.rs b/crates/static-file/types/src/event.rs similarity index 87% rename from crates/static-file/static-file/src/event.rs rename to crates/static-file/types/src/event.rs index a11333ce53a..1e5d2cb6032 100644 --- a/crates/static-file/static-file/src/event.rs +++ b/crates/static-file/types/src/event.rs @@ -1,7 +1,7 @@ use crate::StaticFileTargets; use std::time::Duration; -/// An event emitted by a [`StaticFileProducer`][crate::StaticFileProducer]. +/// An event emitted by the static file producer. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StaticFileProducerEvent { /// Emitted when static file producer started running. diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 6e954a781b7..4e9bf90f1c9 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -9,11 +9,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; +mod event; mod segment; use alloy_primitives::BlockNumber; pub use compression::Compression; +pub use event::StaticFileProducerEvent; pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; +use std::ops::RangeInclusive; /// Default static file block count. pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; @@ -62,6 +65,43 @@ impl HighestStaticFiles { } } +/// Static File targets, per data segment, measured in [`BlockNumber`]. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct StaticFileTargets { + /// Targeted range of headers. + pub headers: Option>, + /// Targeted range of receipts. + pub receipts: Option>, + /// Targeted range of transactions. + pub transactions: Option>, +} + +impl StaticFileTargets { + /// Returns `true` if any of the targets are [Some]. + pub const fn any(&self) -> bool { + self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + } + + /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the + /// highest static file. + pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { + [ + (self.headers.as_ref(), static_files.headers), + (self.receipts.as_ref(), static_files.receipts), + (self.transactions.as_ref(), static_files.transactions), + ] + .iter() + .all(|(target_block_range, highest_static_fileted_block)| { + target_block_range.map_or(true, |target_block_range| { + *target_block_range.start() == + highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { + highest_static_fileted_block + 1 + }) + }) + }) + } +} + /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. Used for segment filename. pub const fn find_fixed_range( From b6558f6bcf83e3ea7c1cf755492406f0cc3c2577 Mon Sep 17 00:00:00 2001 From: clabby Date: Tue, 12 Nov 2024 15:57:41 -0500 Subject: [PATCH 437/970] chore: Update Holocene timestamp for Sepolia (#12479) --- crates/optimism/chainspec/src/lib.rs | 12 ++++++------ crates/optimism/hardforks/src/hardfork.rs | 6 +++--- crates/optimism/node/src/engine.rs | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index a835b02bd1d..c110c4b0821 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -573,11 +573,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732201200 }, + ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732633200 }, ), ( - Head { number: 0, timestamp: 1732201200, ..Default::default() }, - ForkId { hash: ForkHash([0x98, 0x1c, 0x21, 0x69]), next: 0 }, + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 0 }, ), ], ); @@ -644,11 +644,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732201200 }, + ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732633200 }, ), ( - Head { number: 0, timestamp: 1732201200, ..Default::default() }, - ForkId { hash: ForkHash([0x59, 0x5e, 0x2e, 0x6e]), next: 0 }, + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 0 }, ), ], ); diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 91b2584e4f9..661816ae5fe 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -158,7 +158,7 @@ impl OpHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), - Self::Holocene => Some(1732201200), + Self::Holocene => Some(1732633200), }, ) } @@ -257,7 +257,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), - (Self::Holocene.boxed(), ForkCondition::Timestamp(1732201200)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -289,7 +289,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), - (Self::Holocene.boxed(), ForkCondition::Timestamp(1732201200)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 69755d10446..dd4d0c13f24 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -214,7 +214,7 @@ mod test { #[test] fn test_well_formed_attributes_pre_holocene() { let validator = OpEngineValidator::new(get_chainspec()); - let attributes = get_attributes(None, 1732201199); + let attributes = get_attributes(None, 1732633199); let result = Date: Tue, 12 Nov 2024 21:58:16 +0100 Subject: [PATCH 438/970] chore: Move`StatsReader` trait to storage-api and reexport it from old provider crate (#12485) --- crates/storage/provider/src/lib.rs | 4 ++-- crates/storage/provider/src/traits/mod.rs | 3 --- crates/storage/storage-api/src/lib.rs | 3 +++ .../storage/{provider/src/traits => storage-api/src}/stats.rs | 0 4 files changed, 5 insertions(+), 5 deletions(-) rename crates/storage/{provider/src/traits => storage-api/src}/stats.rs (100%) diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 2b002fe11ec..deccdea2831 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -46,8 +46,8 @@ pub use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, }; -// reexport HistoryWriter trait -pub use reth_storage_api::HistoryWriter; +// reexport traits to avoid breaking changes +pub use reth_storage_api::{HistoryWriter, StatsReader}; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 722721525bf..3034eda8044 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -29,9 +29,6 @@ pub use trie::{StorageTrieWriter, TrieWriter}; mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; -mod stats; -pub use stats::StatsReader; - mod full; pub use full::{FullProvider, FullRpcProvider}; diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 21d02325afe..4980335066f 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -56,3 +56,6 @@ pub mod noop; mod history; pub use history::*; + +mod stats; +pub use stats::*; diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/storage-api/src/stats.rs similarity index 100% rename from crates/storage/provider/src/traits/stats.rs rename to crates/storage/storage-api/src/stats.rs From 3c5668600073f07138797bb3d87e589b4f5d0184 Mon Sep 17 00:00:00 2001 From: Ayodeji Akinola Date: Wed, 13 Nov 2024 00:12:45 +0100 Subject: [PATCH 439/970] feat(rpc): add compression to JSON-RPC (#12352) --- Cargo.lock | 10 +- Cargo.toml | 4 +- crates/rpc/rpc-builder/src/lib.rs | 19 +- crates/rpc/rpc-layer/Cargo.toml | 3 +- crates/rpc/rpc-layer/src/compression_layer.rs | 169 ++++++++++++++++++ crates/rpc/rpc-layer/src/lib.rs | 2 + 6 files changed, 195 insertions(+), 12 deletions(-) create mode 100644 crates/rpc/rpc-layer/src/compression_layer.rs diff --git a/Cargo.lock b/Cargo.lock index 5ce7b0c0ec7..ab53c0862ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8944,12 +8944,14 @@ version = "1.1.1" dependencies = [ "alloy-rpc-types-engine", "http", + "http-body-util", "jsonrpsee", "jsonrpsee-http-client", "pin-project", "reqwest", "tokio", "tower 0.4.13", + "tower-http", "tracing", ] @@ -10904,12 +10906,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "futures-core", @@ -10926,7 +10928,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index cd7054ea1e7..9f46533014a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -552,7 +552,8 @@ hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } tower = "0.4" -tower-http = "0.5" +tower-http = "0.6" + # p2p discv5 = "0.8.0" @@ -567,6 +568,7 @@ jsonrpsee-types = "0.24" # http http = "1.0" http-body = "1.0" +http-body-util = "0.1.2" jsonwebtoken = "9" proptest-arbitrary-interop = "0.1.0" diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 385b92af3d0..40e40962349 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -166,6 +166,7 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -197,15 +198,13 @@ use reth_rpc_eth_api::{ EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; -use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; +use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; - pub use cors::CorsDomainError; // re-export for convenience @@ -1647,6 +1646,12 @@ impl RpcServerConfig { jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } + /// Returns a [`CompressionLayer`] that adds compression support (gzip, deflate, brotli, zstd) + /// based on the client's `Accept-Encoding` header + fn maybe_compression_layer() -> Option { + Some(CompressionLayer::new()) + } + /// Builds and starts the configured server(s): http, ws, ipc. /// /// If both http and ws are on the same port, they are combined into one server. @@ -1711,7 +1716,8 @@ impl RpcServerConfig { .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1783,8 +1789,9 @@ impl RpcServerConfig { .http_only() .set_http_middleware( tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index ec8dcb8229e..d44e5e89f01 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -17,10 +17,11 @@ http.workspace = true jsonrpsee-http-client.workspace = true pin-project.workspace = true tower.workspace = true - +tower-http = { workspace = true, features = ["full"] } tracing.workspace = true [dev-dependencies] reqwest.workspace = true tokio = { workspace = true, features = ["macros"] } jsonrpsee = { workspace = true, features = ["server"] } +http-body-util.workspace=true diff --git a/crates/rpc/rpc-layer/src/compression_layer.rs b/crates/rpc/rpc-layer/src/compression_layer.rs new file mode 100644 index 00000000000..cf15f04aa78 --- /dev/null +++ b/crates/rpc/rpc-layer/src/compression_layer.rs @@ -0,0 +1,169 @@ +use jsonrpsee_http_client::{HttpBody, HttpRequest, HttpResponse}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{Layer, Service}; +use tower_http::compression::{Compression, CompressionLayer as TowerCompressionLayer}; + +/// This layer is a wrapper around [`tower_http::compression::CompressionLayer`] that integrates +/// with jsonrpsee's HTTP types. It automatically compresses responses based on the client's +/// Accept-Encoding header. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionLayer { + inner_layer: TowerCompressionLayer, +} + +impl CompressionLayer { + /// Creates a new compression layer with zstd, gzip, brotli and deflate enabled. + pub fn new() -> Self { + Self { + inner_layer: TowerCompressionLayer::new().gzip(true).br(true).deflate(true).zstd(true), + } + } +} + +impl Default for CompressionLayer { + /// Creates a new compression layer with default settings. + /// See [`CompressionLayer::new`] for details. + fn default() -> Self { + Self::new() + } +} + +impl Layer for CompressionLayer { + type Service = CompressionService; + + fn layer(&self, inner: S) -> Self::Service { + CompressionService { compression: self.inner_layer.layer(inner) } + } +} + +/// Service that performs response compression. +/// +/// Created by [`CompressionLayer`]. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionService { + compression: Compression, +} + +impl Service for CompressionService +where + S: Service, + S::Future: Send + 'static, +{ + type Response = HttpResponse; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.compression.poll_ready(cx) + } + + fn call(&mut self, req: HttpRequest) -> Self::Future { + let fut = self.compression.call(req); + + Box::pin(async move { + let resp = fut.await?; + let (parts, compressed_body) = resp.into_parts(); + let http_body = HttpBody::new(compressed_body); + + Ok(Self::Response::from_parts(parts, http_body)) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING}; + use http_body_util::BodyExt; + use jsonrpsee_http_client::{HttpRequest, HttpResponse}; + use std::{convert::Infallible, future::ready}; + + const TEST_DATA: &str = "compress test data "; + const REPEAT_COUNT: usize = 1000; + + #[derive(Clone)] + struct MockRequestService; + + impl Service for MockRequestService { + type Response = HttpResponse; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready( + &mut self, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: HttpRequest) -> Self::Future { + let body = HttpBody::from(TEST_DATA.repeat(REPEAT_COUNT)); + let response = HttpResponse::builder().body(body).unwrap(); + ready(Ok(response)) + } + } + + fn setup_compression_service( + ) -> impl Service { + CompressionLayer::new().layer(MockRequestService) + } + + async fn get_response_size(response: HttpResponse) -> usize { + // Get the total size of the response body + response.into_body().collect().await.unwrap().to_bytes().len() + } + + #[tokio::test] + async fn test_gzip_compression() { + let mut service = setup_compression_service(); + let request = + HttpRequest::builder().header(ACCEPT_ENCODING, "gzip").body(HttpBody::empty()).unwrap(); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Make the request + let response = service.call(request).await.unwrap(); + + // Verify the response has gzip content-encoding + assert_eq!( + response.headers().get(CONTENT_ENCODING).unwrap(), + "gzip", + "Response should be gzip encoded" + ); + + // Verify the response body is actually compressed (should be smaller than original) + let compressed_size = get_response_size(response).await; + assert!( + compressed_size < uncompressed_len, + "Compressed size ({compressed_size}) should be smaller than original size ({uncompressed_len})" + ); + } + + #[tokio::test] + async fn test_no_compression_when_not_requested() { + // Create a service with compression + let mut service = setup_compression_service(); + let request = HttpRequest::builder().body(HttpBody::empty()).unwrap(); + + let response = service.call(request).await.unwrap(); + assert!( + response.headers().get(CONTENT_ENCODING).is_none(), + "Response should not be compressed when not requested" + ); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Verify the response body matches the original size + let response_size = get_response_size(response).await; + assert!( + response_size == uncompressed_len, + "Response size ({response_size}) should equal original size ({uncompressed_len})" + ); + } +} diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 8387bb160e8..540daf5592b 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -13,9 +13,11 @@ use jsonrpsee_http_client::HttpResponse; mod auth_client_layer; mod auth_layer; +mod compression_layer; mod jwt_validator; pub use auth_layer::{AuthService, ResponseFuture}; +pub use compression_layer::CompressionLayer; // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; From 115a20ea6a6f42fd6abcf23d2cdaebb3cf25b51e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 00:12:59 +0100 Subject: [PATCH 440/970] fix: deposit tx gasPrice must be 0 (#12486) --- crates/optimism/rpc/src/eth/transaction.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 20aa379a0c1..39fa288feed 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -122,11 +122,18 @@ where block_hash, block_number, index: transaction_index, base_fee, .. } = tx_info; - let effective_gas_price = base_fee - .map(|base_fee| { - inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee - }) - .unwrap_or_else(|| inner.max_fee_per_gas()); + let effective_gas_price = if inner.is_deposit() { + // For deposits, we must always set the `gasPrice` field to 0 in rpc + // deposit tx don't have a gas price field, but serde of `Transaction` will take care of + // it + 0 + } else { + base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()) + }; Ok(Transaction { inner: alloy_rpc_types_eth::Transaction { From a71dd9c91f1f27b40bf289fbac332ad495c97fb6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 00:13:11 +0100 Subject: [PATCH 441/970] chore: introduce tuple type for pruned info (#12484) --- crates/prune/prune/src/event.rs | 8 ++------ crates/prune/prune/src/pruner.rs | 18 +++++++++++------- crates/prune/types/src/lib.rs | 3 ++- crates/prune/types/src/pruner.rs | 14 ++++++++++++-- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs index 95a90d7628c..4f5806e592e 100644 --- a/crates/prune/prune/src/event.rs +++ b/crates/prune/prune/src/event.rs @@ -1,5 +1,5 @@ use alloy_primitives::BlockNumber; -use reth_prune_types::{PruneProgress, PruneSegment}; +use reth_prune_types::PrunedSegmentInfo; use std::time::Duration; /// An event emitted by a [Pruner][crate::Pruner]. @@ -8,9 +8,5 @@ pub enum PrunerEvent { /// Emitted when pruner started running. Started { tip_block_number: BlockNumber }, /// Emitted when pruner finished running. - Finished { - tip_block_number: BlockNumber, - elapsed: Duration, - stats: Vec<(PruneSegment, usize, PruneProgress)>, - }, + Finished { tip_block_number: BlockNumber, elapsed: Duration, stats: Vec }, } diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index d21560cae60..0ad149bb654 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -9,7 +9,7 @@ use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PruneSegment, PrunerOutput}; +use reth_prune_types::{PruneLimiter, PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; @@ -21,8 +21,6 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; - /// Pruner with preset provider factory. pub type PrunerWithFactory = Pruner<::ProviderRW, PF>; @@ -174,14 +172,15 @@ where /// be pruned according to the highest `static_files`. Segments are parts of the database that /// represent one or more tables. /// - /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. + /// Returns a list of stats per pruned segment, total number of entries pruned, and + /// [`PruneProgress`]. fn prune_segments( &mut self, provider: &Provider, tip_block_number: BlockNumber, limiter: &mut PruneLimiter, - ) -> Result<(PrunerStats, usize, PrunerOutput), PrunerError> { - let mut stats = PrunerStats::new(); + ) -> Result<(Vec, usize, PrunerOutput), PrunerError> { + let mut stats = Vec::with_capacity(self.segments.len()); let mut pruned = 0; let mut output = PrunerOutput { progress: PruneProgress::Finished, @@ -249,7 +248,12 @@ where if segment_output.pruned > 0 { limiter.increment_deleted_entries_count_by(segment_output.pruned); pruned += segment_output.pruned; - stats.push((segment.segment(), segment_output.pruned, segment_output.progress)); + let info = PrunedSegmentInfo { + segment: segment.segment(), + pruned: segment_output.pruned, + progress: segment_output.progress, + }; + stats.push(info); } } else { debug!(target: "pruner", segment = ?segment.segment(), purpose = ?segment.purpose(), "Nothing to prune for the segment"); diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 8483b7b7370..0722e760faf 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -19,7 +19,8 @@ pub use checkpoint::PruneCheckpoint; pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use pruner::{ - PruneInterruptReason, PruneProgress, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, + PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, + SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; use serde::{Deserialize, Serialize}; diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index dbfafff639e..3046dda0679 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,6 +1,5 @@ -use alloy_primitives::{BlockNumber, TxNumber}; - use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; +use alloy_primitives::{BlockNumber, TxNumber}; /// Pruner run output. #[derive(Debug)] @@ -17,6 +16,17 @@ impl From for PrunerOutput { } } +/// Represents information of a pruner run for a segment. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PrunedSegmentInfo { + /// The pruned segment + pub segment: PruneSegment, + /// Number of pruned entries + pub pruned: usize, + /// Prune progress + pub progress: PruneProgress, +} + /// Segment pruning output. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SegmentOutput { From ce50370ba5855bb1c2b925e2df63496c849a6972 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 13 Nov 2024 00:15:50 +0100 Subject: [PATCH 442/970] primitives: rm alloy `Withdrawal` reexport (#12487) --- crates/primitives-traits/src/lib.rs | 1 - crates/primitives-traits/src/withdrawal.rs | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index afcc74a894d..6848da45814 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -41,7 +41,6 @@ pub use block::{ }; mod withdrawal; -pub use withdrawal::Withdrawal; mod error; pub use error::{GotExpected, GotExpectedBoxed}; diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 699229684ec..0849ab6202e 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,12 +1,8 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; - #[cfg(test)] mod tests { - use super::*; + use alloy_eips::eip4895::Withdrawal; use alloy_primitives::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; From a96dee17f595c9d8d704f008a64f57ce9c8a2e84 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 01:06:25 +0100 Subject: [PATCH 443/970] chore: bump alloy 064 (#12488) --- Cargo.lock | 168 +++++++++--------- Cargo.toml | 64 +++---- crates/optimism/cli/src/ovm_file_codec.rs | 2 +- crates/optimism/rpc/src/eth/transaction.rs | 31 ++-- crates/primitives/src/transaction/mod.rs | 63 +++---- .../primitives/src/transaction/signature.rs | 2 +- 6 files changed, 170 insertions(+), 160 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab53c0862ee..9748712e245 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef11c6b2dfbf77dca7bafc6759860391395f07c04d5486f2a2e2563d2961639b" +checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8faa407ef916bfe0677c52c9b2258ce0698c53e9e15a837d1501e3ae9e57421a" +checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d6c0c1744a7af7d325dca6b5c5bb431a6307c0961088f7a236ca2694c4a87e" +checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a5a0a01ef6ec3cd3ebd52a7b3bc7f8a92b23e478e69c07abd94abf05e6b48e" +checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" dependencies = [ "alloy-primitives", "alloy-serde", @@ -230,9 +230,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" +checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fd0e2cff5ab68defc5050ff9e81cb053c5b52cf4809fc8786664898e29ae75" +checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c9eca0c04ca8a663966ce7f5b19c03927f2b4d82910cb76cb4008490cfa838" +checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c3050f19dc93a7f09fef670c8db04a15e7e2901494ca40decbce323be69643" +checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ebd44d0ab30f1018dc1ff01686ea1a3ae732601841a4fb277c9d0b3a34bf50" +checksum = "c9805d126f24be459b958973c0569c73e1aadd27d4535eee82b2b6764aa03616" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -309,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -341,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8e5a28e7c4c04afc0f20b2aecf6f9214d6cfd5009187c0b8616a8f8918739c" +checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365dd813ec271a14febc31ea8ed64185856534f5644511f0c7a2961db060d878" +checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +423,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336362936bb9fef88f27d51f2ede8c15cdfdb7f81b042e74257770052547101" +checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9a46bc01bc27dbf4dd27d46986eda661ffe99e78aea3078a77b8c064072b01" +checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82845a6f1ed33ef4edf79aa7cb091df31a532675921fb85041fbd8d6e029093d" +checksum = "6bfd9b2cc3a1985f1f6da5afc41120256f9f9316fcd89e054cea99dbb10172f6" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +473,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e73c06c3e44866d304fe28e8cebc8354f99fe405cc7c9bd23ed92eaebca3c07" +checksum = "5ca97963132f78ddfc60e43a017348e6d52eea983925c23652f5b330e8e02291" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f9f6f071674c62424b62e22307aa83a35a0b1b84820649cc82034a50389ddc6" +checksum = "922fa76678d2f9f07ea1b19309b5cfbf244c6029dcba3515227b515fdd6ed4a7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63a857818fe47dacaa7cc7a9cdcfee212cf1ebf119ab7bd157065d434671892d" +checksum = "ba2253bee958658ebd614c07a61c40580e09dd1fad3f017684314442332ab753" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee44332315ef1adde384e44db3b5724d74d0cd0e0856a681c4db2b4da3a423e" +checksum = "3f56294dce86af23ad6ee8df46cf8b0d292eb5d1ff67dc88a0886051e32b1faf" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58fa055e02d04bc70443ecce984951fb5be02d2c843c640ca48237cdec66af1" +checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" dependencies = [ "alloy-consensus", "alloy-eips", @@ -551,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debf779b847b058b7c9cdef576f5ef539bc3032c5f6e5c1c2f51820b4f74e6d9" +checksum = "8647f8135ee3d5de1cf196706c905c05728a4e38bb4a5b61a7214bd1ba8f60a6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -564,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1319edeae0e5f453424d658f8f450a5b1090b9ee6c0c014dc216b42f11c9dc57" +checksum = "ecd8b4877ef520c138af702097477cdd19504a8e1e4675ba37e92ba40f2d3c6f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -578,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fcb4b823dcd7228a89be1be85a4fa8008ad6d91b169b61f75f36b6e7386f37b" +checksum = "1d4ab49acf90a71f7fb894dc5fd485f1f07a1e348966c714c4d1e0b7478850a8" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feafd71e0e252b063fe4b07962beedf0445e66b07b4b44af178863d21e75b0fa" +checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,9 +602,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebad84d52550351438ec7f151dbc551f870c31eecf23b473df5b779a91eee8ca" +checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" dependencies = [ "alloy-primitives", "async-trait", @@ -616,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed742d76943b5ebaabfdf3d0d8b69a4377fc2981c7955a807e33a3469aed0cdc" +checksum = "d8396f6dff60700bc1d215ee03d86ff56de268af96e2bf833a14d0bafcab9882" dependencies = [ "alloy-consensus", "alloy-network", @@ -634,9 +634,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" +checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -648,9 +648,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" +checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -666,9 +666,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" +checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" dependencies = [ "const-hex", "dunce", @@ -681,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" +checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" dependencies = [ "serde", "winnow", @@ -691,9 +691,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" +checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da63700a2b3176b3009a6d3672d0c657280a517dcec7659c991c55e863a83165" +checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6613c3abc567b710217d241650ef73cfb8df9bcdc2ef23fdedabf363637e2a00" +checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7087a28734aac88a606884cdde8c89ad053bd1c0580c787e31f917a8e4a7cbdd" +checksum = "063edc0660e81260653cc6a95777c29d54c2543a668aa5da2359fb450d25a1ba" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672797b3f7bcbe67f712f9e8e5703b22f24594bd2b248a90916bdb58811b8b6e" +checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -5297,9 +5297,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5745eca869a0b476fbd34025ac40c06a15c46ffc10d6b1c40d21475b05f835" +checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5315,9 +5315,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa6b2f26a84984213bc12649dfd8466a46ddeede3b8d2d936583000a8362b117" +checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5329,9 +5329,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67085a07a35e71db0a95ac923a2de2c186a37c5f376a1e4dee19b5ef8a6ffcaa" +checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" dependencies = [ "alloy-consensus", "alloy-network", @@ -5344,9 +5344,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "880331b1b7718236a016eb7ac5530abcf7d5ca8b7ad78ac6c3dc8f73826ce9ee" +checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5364,9 +5364,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b75a52c8659756cfe1119f7711e94749c8dec6ad82408f3c55641ae413fb83" +checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5383,9 +5383,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622eabdff1739ef163aeb8e8385d5936fa54c14cfa55b06f72f1c8faa987f715" +checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10452,9 +10452,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" +checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 9f46533014a..3f65bceb4bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -431,46 +431,46 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.3", default-features = false } -alloy-contract = { version = "0.6.3", default-features = false } -alloy-eips = { version = "0.6.3", default-features = false } -alloy-genesis = { version = "0.6.3", default-features = false } -alloy-json-rpc = { version = "0.6.3", default-features = false } -alloy-network = { version = "0.6.3", default-features = false } -alloy-network-primitives = { version = "0.6.3", default-features = false } -alloy-node-bindings = { version = "0.6.3", default-features = false } -alloy-provider = { version = "0.6.3", features = [ +alloy-consensus = { version = "0.6.4", default-features = false } +alloy-contract = { version = "0.6.4", default-features = false } +alloy-eips = { version = "0.6.4", default-features = false } +alloy-genesis = { version = "0.6.4", default-features = false } +alloy-json-rpc = { version = "0.6.4", default-features = false } +alloy-network = { version = "0.6.4", default-features = false } +alloy-network-primitives = { version = "0.6.4", default-features = false } +alloy-node-bindings = { version = "0.6.4", default-features = false } +alloy-provider = { version = "0.6.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.3", default-features = false } -alloy-rpc-client = { version = "0.6.3", default-features = false } -alloy-rpc-types = { version = "0.6.3", features = [ +alloy-pubsub = { version = "0.6.4", default-features = false } +alloy-rpc-client = { version = "0.6.4", default-features = false } +alloy-rpc-types = { version = "0.6.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.3", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.3", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.3", default-features = false } -alloy-rpc-types-debug = { version = "0.6.3", default-features = false } -alloy-rpc-types-engine = { version = "0.6.3", default-features = false } -alloy-rpc-types-eth = { version = "0.6.3", default-features = false } -alloy-rpc-types-mev = { version = "0.6.3", default-features = false } -alloy-rpc-types-trace = { version = "0.6.3", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.3", default-features = false } -alloy-serde = { version = "0.6.3", default-features = false } -alloy-signer = { version = "0.6.3", default-features = false } -alloy-signer-local = { version = "0.6.3", default-features = false } -alloy-transport = { version = "0.6.3" } -alloy-transport-http = { version = "0.6.3", features = [ +alloy-rpc-types-admin = { version = "0.6.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.4", default-features = false } +alloy-rpc-types-debug = { version = "0.6.4", default-features = false } +alloy-rpc-types-engine = { version = "0.6.4", default-features = false } +alloy-rpc-types-eth = { version = "0.6.4", default-features = false } +alloy-rpc-types-mev = { version = "0.6.4", default-features = false } +alloy-rpc-types-trace = { version = "0.6.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.4", default-features = false } +alloy-serde = { version = "0.6.4", default-features = false } +alloy-signer = { version = "0.6.4", default-features = false } +alloy-signer-local = { version = "0.6.4", default-features = false } +alloy-transport = { version = "0.6.4" } +alloy-transport-http = { version = "0.6.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.3", default-features = false } -alloy-transport-ws = { version = "0.6.3", default-features = false } +alloy-transport-ipc = { version = "0.6.4", default-features = false } +alloy-transport-ws = { version = "0.6.4", default-features = false } # op -op-alloy-rpc-types = "0.6.4" -op-alloy-rpc-types-engine = "0.6.4" -op-alloy-network = "0.6.4" -op-alloy-consensus = "0.6.4" +op-alloy-rpc-types = "0.6.5" +op-alloy-rpc-types-engine = "0.6.5" +op-alloy-network = "0.6.5" +op-alloy-consensus = "0.6.5" # misc aquamarine = "0.6" diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs index 624305c4b6e..b29d30093ec 100644 --- a/crates/optimism/cli/src/ovm_file_codec.rs +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -149,7 +149,7 @@ impl TransactionSigned { chain_id: None, }; - let v: u64 = Decodable::decode(data)?; + let v = Decodable::decode(data)?; let r: U256 = Decodable::decode(data)?; let s: U256 = Decodable::decode(data)?; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 39fa288feed..11e33817229 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,7 +1,7 @@ //! Loads and formats OP transaction RPC response. use alloy_consensus::{Signed, Transaction as _}; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{Bytes, Sealable, Sealed, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; @@ -86,6 +86,7 @@ where let from = tx.signer(); let TransactionSigned { transaction, signature, hash } = tx.into_signed(); let mut deposit_receipt_version = None; + let mut deposit_nonce = None; let inner = match transaction { reth_primitives::Transaction::Legacy(tx) => { @@ -102,19 +103,16 @@ where Signed::new_unchecked(tx, signature, hash).into() } reth_primitives::Transaction::Deposit(tx) => { - let deposit_info = self - .inner + self.inner .provider() .receipt_by_hash(hash) .map_err(Self::Error::from_eth_err)? - .and_then(|receipt| receipt.deposit_receipt_version.zip(receipt.deposit_nonce)); + .inspect(|receipt| { + deposit_receipt_version = receipt.deposit_receipt_version; + deposit_nonce = receipt.deposit_nonce; + }); - if let Some((version, _)) = deposit_info { - deposit_receipt_version = Some(version); - // TODO: set nonce - } - - OpTxEnvelope::Deposit(tx) + OpTxEnvelope::Deposit(tx.seal_unchecked(hash)) } }; @@ -144,6 +142,7 @@ where from, effective_gas_price: Some(effective_gas_price), }, + deposit_nonce, deposit_receipt_version, }) } @@ -154,7 +153,17 @@ where OpTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, OpTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, OpTxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, - OpTxEnvelope::Deposit(tx) => &mut tx.input, + OpTxEnvelope::Deposit(tx) => { + let (mut deposit, hash) = std::mem::replace( + tx, + Sealed::new_unchecked(Default::default(), Default::default()), + ) + .split(); + deposit.input = deposit.input.slice(..4); + let mut deposit = deposit.seal_unchecked(hash); + std::mem::swap(tx, &mut deposit); + return + } _ => return, }; *input = input.slice(..4); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 26815fc38de..f0caa2863aa 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -330,21 +330,6 @@ impl Transaction { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Returns the effective gas price for the given base fee. - /// - /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match self { - Self::Legacy(tx) => tx.gas_price, - Self::Eip2930(tx) => tx.gas_price, - Self::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip7702(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective miner gas tip cap (`gasTipCap`) for the given base fee: /// `min(maxFeePerGas - baseFee, maxPriorityFeePerGas)` /// @@ -755,6 +740,18 @@ impl alloy_consensus::Transaction for Transaction { } } + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.effective_gas_price(base_fee), + Self::Eip4844(tx) => tx.effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.effective_gas_price(base_fee), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.effective_gas_price(base_fee), + } + } + fn is_dynamic_fee(&self) -> bool { match self { Self::Legacy(_) | Self::Eip2930(_) => false, @@ -764,6 +761,18 @@ impl alloy_consensus::Transaction for Transaction { } } + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -835,18 +844,6 @@ impl alloy_consensus::Transaction for Transaction { Self::Deposit(tx) => tx.authorization_list(), } } - - fn kind(&self) -> TxKind { - match self { - Self::Legacy(tx) => tx.kind(), - Self::Eip2930(tx) => tx.kind(), - Self::Eip1559(tx) => tx.kind(), - Self::Eip4844(tx) => tx.kind(), - Self::Eip7702(tx) => tx.kind(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.kind(), - } - } } /// Signed transaction without its Hash. Used type for inserting into the DB. @@ -1457,10 +1454,18 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().priority_fee_or_price() } + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + fn is_dynamic_fee(&self) -> bool { self.deref().is_dynamic_fee() } + fn kind(&self) -> TxKind { + self.deref().kind() + } + fn value(&self) -> U256 { self.deref().value() } @@ -1484,10 +1489,6 @@ impl alloy_consensus::Transaction for TransactionSigned { fn authorization_list(&self) -> Option<&[SignedAuthorization]> { self.deref().authorization_list() } - - fn kind(&self) -> TxKind { - self.deref().kind() - } } impl From for TransactionSigned { diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index ef4fab0fccb..8fab719947a 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -15,7 +15,7 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ pub(crate) fn decode_with_eip155_chain_id( buf: &mut &[u8], ) -> alloy_rlp::Result<(Signature, Option)> { - let v: u64 = Decodable::decode(buf)?; + let v = Decodable::decode(buf)?; let r: U256 = Decodable::decode(buf)?; let s: U256 = Decodable::decode(buf)?; From b0a39e8abcb9a117510d1dd281fa308253b92c3a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 01:06:33 +0100 Subject: [PATCH 444/970] chore: import static file types directly (#12490) --- Cargo.lock | 3 +-- crates/node/events/Cargo.toml | 3 +-- crates/node/events/src/node.rs | 5 ++--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9748712e245..b6744e1f1ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8089,13 +8089,12 @@ dependencies = [ "humantime", "pin-project", "reth-beacon-consensus", - "reth-network", "reth-network-api", "reth-primitives-traits", "reth-provider", "reth-prune", "reth-stages", - "reth-static-file", + "reth-static-file-types", "tokio", "tracing", ] diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 6af3d8cbeb4..cc754d58320 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -14,11 +14,10 @@ workspace = true # reth reth-provider.workspace = true reth-beacon-consensus.workspace = true -reth-network = { workspace = true, features = ["serde"] } reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true -reth-static-file.workspace = true +reth-static-file-types.workspace = true reth-primitives-traits.workspace = true # ethereum diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index fb0f4d48d77..39c6355e36e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -8,12 +8,11 @@ use futures::Stream; use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; -use reth_network::NetworkEvent; -use reth_network_api::PeersInfo; +use reth_network_api::{NetworkEvent, PeersInfo}; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; -use reth_static_file::StaticFileProducerEvent; +use reth_static_file_types::StaticFileProducerEvent; use std::{ fmt::{Display, Formatter}, future::Future, From cef7ec80c13acac11ac4c2b93bf6bd19f4298d8c Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:00:07 -0600 Subject: [PATCH 445/970] Move CanonChainTracker to storage-api (#12491) --- Cargo.lock | 1 + crates/storage/provider/src/traits/mod.rs | 3 --- crates/storage/storage-api/Cargo.toml | 1 + .../{provider/src/traits => storage-api/src}/chain_info.rs | 0 crates/storage/storage-api/src/lib.rs | 3 +++ 5 files changed, 5 insertions(+), 3 deletions(-) rename crates/storage/{provider/src/traits => storage-api/src}/chain_info.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index b6744e1f1ec..1525d53b8ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9121,6 +9121,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-engine", "auto_impl", "reth-chainspec", "reth-db", diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 3034eda8044..5542ea168ab 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -9,9 +9,6 @@ pub use reth_evm::provider::EvmEnvProvider; mod block; pub use block::*; -mod chain_info; -pub use chain_info::CanonChainTracker; - mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 32aadc1922d..2b13f6332f8 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -28,5 +28,6 @@ reth-db.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true auto_impl.workspace = true diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs similarity index 100% rename from crates/storage/provider/src/traits/chain_info.rs rename to crates/storage/storage-api/src/chain_info.rs diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 4980335066f..13a44b482a6 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -46,6 +46,9 @@ pub use transactions::*; mod trie; pub use trie::*; +mod chain_info; +pub use chain_info::*; + mod withdrawals; pub use withdrawals::*; From 1ce067b77bd5a1c2ec7a03c0bdde363afc602bcc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 02:42:47 +0100 Subject: [PATCH 446/970] chore: rm direct reth-provider dep (#12492) --- Cargo.lock | 2 +- crates/node/events/Cargo.toml | 2 +- crates/node/events/src/cl.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1525d53b8ec..15c627e8437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8091,10 +8091,10 @@ dependencies = [ "reth-beacon-consensus", "reth-network-api", "reth-primitives-traits", - "reth-provider", "reth-prune", "reth-stages", "reth-static-file-types", + "reth-storage-api", "tokio", "tracing", ] diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index cc754d58320..7a5b1cf3b02 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-provider.workspace = true +reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-stages.workspace = true diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 6d29c9bbfa2..bf0d4a59b21 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,7 +1,7 @@ //! Events related to Consensus Layer health. use futures::Stream; -use reth_provider::CanonChainTracker; +use reth_storage_api::CanonChainTracker; use std::{ fmt, pin::Pin, From 2f794b6b990d58f500d614ddcdbaebb622bcb063 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 03:06:13 +0100 Subject: [PATCH 447/970] chore: rm unhinged attributes ordering (#12498) --- crates/net/p2p/src/error.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 181a0b96b3c..45d34fc04ec 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -80,24 +80,24 @@ impl EthResponseValidator for RequestResult> { #[derive(Clone, Debug, Eq, PartialEq, Display, Error)] pub enum RequestError { /// Closed channel to the peer. - #[display("closed channel to the peer")] /// Indicates the channel to the peer is closed. + #[display("closed channel to the peer")] ChannelClosed, /// Connection to a peer dropped while handling the request. - #[display("connection to a peer dropped while handling the request")] /// Represents a dropped connection while handling the request. + #[display("connection to a peer dropped while handling the request")] ConnectionDropped, /// Capability message is not supported by the remote peer. - #[display("capability message is not supported by remote peer")] /// Indicates an unsupported capability message from the remote peer. + #[display("capability message is not supported by remote peer")] UnsupportedCapability, /// Request timed out while awaiting response. - #[display("request timed out while awaiting response")] /// Represents a timeout while waiting for a response. + #[display("request timed out while awaiting response")] Timeout, /// Received bad response. - #[display("received bad response")] /// Indicates a bad response was received. + #[display("received bad response")] BadResponse, } From b7e8d5aa3e81cfadd92f2c816732d4ad1a7b011c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 09:34:20 +0100 Subject: [PATCH 448/970] chore: add SealedHeader::seal (#12497) --- crates/primitives-traits/src/header/sealed.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index b0fe4434298..145e2722bfa 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -56,6 +56,14 @@ impl SealedHeader { } } +impl SealedHeader { + /// Hashes the header and creates a sealed header. + pub fn seal(header: H) -> Self { + let hash = header.hash_slow(); + Self::new(header, hash) + } +} + impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { From 5c62d68dd8a5107a9fde7f8c809090d3da8aeb59 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 10:14:55 +0100 Subject: [PATCH 449/970] chore: rm unused error variants (#12499) --- crates/payload/primitives/src/error.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 16446255c35..ab222f5f6ef 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -21,9 +21,6 @@ pub enum PayloadBuilderError { /// If there's no payload to resolve. #[error("missing payload")] MissingPayload, - /// Build cancelled - #[error("build outcome cancelled")] - BuildOutcomeCancelled, /// Error occurring in the blob store. #[error(transparent)] BlobStore(#[from] BlobStoreError), @@ -33,9 +30,6 @@ pub enum PayloadBuilderError { /// Unrecoverable error during evm execution. #[error("evm execution error: {0}")] EvmExecutionError(EVMError), - /// Thrown if the payload requests withdrawals before Shanghai activation. - #[error("withdrawals set before Shanghai activation")] - WithdrawalsBeforeShanghai, /// Any other payload building errors. #[error(transparent)] Other(Box), From 39392e95f1675f09adce68b21af499b33f539fd3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 10:15:14 +0100 Subject: [PATCH 450/970] chore: only issue single header request (#12496) --- crates/node/core/src/utils.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 7aeb14c4c0e..3ea8fba2668 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -8,9 +8,7 @@ use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; use reth_consensus::Consensus; use reth_network_p2p::{ - bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, - priority::Priority, + bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; use reth_primitives::{SealedBlock, SealedHeader}; use std::{ @@ -44,17 +42,13 @@ pub async fn get_single_header( where Client: HeadersClient, { - let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id }; + let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split(); - let (peer_id, response) = - client.get_headers_with_priority(request, Priority::High).await?.split(); - - if response.len() != 1 { + let Some(sealed_header) = response.map(|block| block.seal_slow()) else { client.report_bad_message(peer_id); - eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len()) - } + eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") + }; - let sealed_header = response.into_iter().next().unwrap().seal_slow(); let (header, seal) = sealed_header.into_parts(); let header = SealedHeader::new(header, seal); From 03f3646355529554e2a38f2a821881425bc04531 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 10:15:34 +0100 Subject: [PATCH 451/970] chore: use let some notation (#12494) --- crates/node/core/src/utils.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 3ea8fba2668..45281dff0bd 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -80,14 +80,12 @@ where { let (peer_id, response) = client.get_block_body(header.hash()).await?.split(); - if response.is_none() { + let Some(body) = response else { client.report_bad_message(peer_id); eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") - } + }; - let body = response.unwrap(); let block = SealedBlock { header, body }; - consensus.validate_block_pre_execution(&block)?; Ok(block) From bf44c9724f68d4aabc9ff1e27d278f36328b8d8f Mon Sep 17 00:00:00 2001 From: Ashutosh Varma Date: Wed, 13 Nov 2024 16:45:32 +0700 Subject: [PATCH 452/970] feat: add support for `eth_signTransaction` (#12500) --- crates/rpc/rpc-builder/tests/it/http.rs | 14 ++++----- crates/rpc/rpc-eth-api/src/core.rs | 5 ++-- .../rpc-eth-api/src/helpers/transaction.rs | 30 ++++++++++++------- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index b5faa71cc5e..8393d9427a6 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -278,7 +278,13 @@ where .await .unwrap(); EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction(client, transaction_request) + EthApiClient::::send_transaction( + client, + transaction_request.clone(), + ) + .await + .unwrap_err(); + EthApiClient::::sign_transaction(client, transaction_request) .await .unwrap_err(); EthApiClient::::hashrate(client).await.unwrap(); @@ -318,12 +324,6 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - EthApiClient::::sign_transaction(client, call_request.clone()) - .await - .err() - .unwrap() - )); } async fn test_basic_debug_calls(client: &C) diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 8072021d990..9cd9ba2921a 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -780,8 +780,9 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); + Ok(EthTransactions::sign_transaction(self, request).await?) } /// Handler for: `eth_signTypedData` diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index ca4b0322e72..e041b8c4605 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -400,16 +400,10 @@ pub trait EthTransactions: LoadTransaction { txn: TransactionRequest, ) -> impl Future> + Send { async move { - let signers: Vec<_> = self.signers().read().iter().cloned().collect(); - for signer in signers { - if signer.is_signer_for(from) { - return match signer.sign_transaction(txn, from).await { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into_eth_err()), - } - } - } - Err(EthApiError::InvalidTransactionSignature.into()) + self.find_signer(from)? + .sign_transaction(txn, from) + .await + .map_err(Self::Error::from_eth_err) } } @@ -430,6 +424,22 @@ pub trait EthTransactions: LoadTransaction { } } + /// Signs a transaction request using the given account in request + /// Returns the EIP-2718 encoded signed transaction. + fn sign_transaction( + &self, + request: TransactionRequest, + ) -> impl Future> + Send { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + Ok(self.sign_request(&from, request).await?.encoded_2718().into()) + } + } + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { Ok(self From 281c415cb02ea661b2d20ac64f2d4cb1a325347d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 11:28:28 +0100 Subject: [PATCH 453/970] chore: reorder validation items (#12503) --- crates/rpc/rpc/src/validation.rs | 109 ++++++++++++++++--------------- 1 file changed, 55 insertions(+), 54 deletions(-) diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index c3f2aab70bb..b997dec1e01 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -30,60 +30,6 @@ use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::RwLock; -/// Configuration for validation API. -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct ValidationApiConfig { - /// Disallowed addresses. - pub disallow: HashSet
, -} - -#[derive(Debug, thiserror::Error)] -pub enum ValidationApiError { - #[error("block gas limit mismatch: {_0}")] - GasLimitMismatch(GotExpected), - #[error("block gas used mismatch: {_0}")] - GasUsedMismatch(GotExpected), - #[error("block parent hash mismatch: {_0}")] - ParentHashMismatch(GotExpected), - #[error("block hash mismatch: {_0}")] - BlockHashMismatch(GotExpected), - #[error("missing latest block in database")] - MissingLatestBlock, - #[error("could not verify proposer payment")] - ProposerPayment, - #[error("invalid blobs bundle")] - InvalidBlobsBundle, - #[error("block accesses blacklisted address: {_0}")] - Blacklist(Address), - #[error(transparent)] - Blob(#[from] BlobTransactionValidationError), - #[error(transparent)] - Consensus(#[from] ConsensusError), - #[error(transparent)] - Provider(#[from] ProviderError), - #[error(transparent)] - Execution(#[from] BlockExecutionError), -} - -#[derive(Debug)] -pub struct ValidationApiInner { - /// The provider that can interact with the chain. - provider: Provider, - /// Consensus implementation. - consensus: Arc, - /// Execution payload validator. - payload_validator: ExecutionPayloadValidator, - /// Block executor factory. - executor_provider: E, - /// Set of disallowed addresses - disallow: HashSet
, - /// Cached state reads to avoid redundant disk I/O across multiple validation attempts - /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the - /// latest head block state. Uses async `RwLock` to safely handle concurrent validation - /// requests. - cached_state: RwLock<(B256, CachedReads)>, -} - /// The type that implements the `validation` rpc namespace trait #[derive(Debug, derive_more::Deref)] pub struct ValidationApi { @@ -486,3 +432,58 @@ where .to_rpc_result() } } + +#[derive(Debug)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc, + /// Execution payload validator. + payload_validator: ExecutionPayloadValidator, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, + /// Cached state reads to avoid redundant disk I/O across multiple validation attempts + /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the + /// latest head block state. Uses async `RwLock` to safely handle concurrent validation + /// requests. + cached_state: RwLock<(B256, CachedReads)>, +} + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +/// Errors thrown by the validation API. +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), +} From 68e7ad6fe595a98564d2b2d51ec836d4939ec869 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 13 Nov 2024 12:21:58 +0100 Subject: [PATCH 454/970] chore(dep): bump alloy-trie (#12511) --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15c627e8437..9e1d2330091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" +checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4601,7 +4601,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -11086,7 +11086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -11561,7 +11561,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] From 527767cc34f4ebac19281fe6ed7b73c360168c12 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 13:15:42 +0100 Subject: [PATCH 455/970] chore: remove unused trait bound for evmenv (#12505) --- crates/rpc/rpc/src/eth/filter.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 3782780f5a6..132d99a5c1a 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; -use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; +use reth_provider::{BlockIdReader, BlockReader, ProviderError}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, }; @@ -144,7 +144,7 @@ where impl EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, Eth: FullEthApiTypes, { @@ -244,7 +244,7 @@ where impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, Eth: FullEthApiTypes + 'static, { @@ -367,7 +367,7 @@ struct EthFilterInner { impl EthFilterInner where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, { /// Returns logs matching given filter object. From 9313737dbbf0f46fa497e63d452b427c7dafe498 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:41:56 +0100 Subject: [PATCH 456/970] primitives: use `SealedHeader::seal` (#12507) --- crates/blockchain-tree/src/blockchain_tree.rs | 10 +++---- crates/chain-state/src/test_utils.rs | 7 ++--- .../beacon/src/engine/invalid_headers.rs | 6 ++-- crates/consensus/beacon/src/engine/sync.rs | 13 +++----- .../consensus/beacon/src/engine/test_utils.rs | 7 ++--- crates/consensus/common/src/validation.rs | 23 +++++--------- crates/engine/tree/src/backfill.rs | 10 +++---- crates/engine/tree/src/download.rs | 9 ++---- crates/engine/tree/src/test_utils.rs | 6 ++-- crates/engine/tree/src/tree/mod.rs | 7 ++--- crates/ethereum/consensus/src/lib.rs | 10 +++---- crates/net/downloaders/src/file_client.rs | 8 ++--- .../src/headers/reverse_headers.rs | 17 +++-------- .../net/downloaders/src/headers/test_utils.rs | 5 +--- crates/net/p2p/src/full_block.rs | 28 ++++------------- crates/net/p2p/src/test_utils/headers.rs | 13 ++------ crates/node/core/src/utils.rs | 6 ++-- crates/primitives-traits/src/header/sealed.rs | 4 +-- crates/primitives/src/block.rs | 6 ++-- crates/rpc/rpc-engine-api/tests/it/payload.rs | 6 ++-- crates/stages/stages/benches/setup/mod.rs | 10 ++----- crates/stages/stages/src/stages/execution.rs | 7 ++--- crates/stages/stages/src/stages/headers.rs | 6 ++-- crates/stages/stages/src/stages/merkle.rs | 16 +++------- .../provider/src/providers/consistent.rs | 30 +++++-------------- crates/storage/provider/src/providers/mod.rs | 30 +++++-------------- .../storage/provider/src/test_utils/blocks.rs | 23 ++++---------- .../storage/provider/src/test_utils/mock.rs | 24 +++------------ crates/storage/storage-api/src/block.rs | 11 ++----- crates/transaction-pool/src/maintain.rs | 6 ++-- examples/db-access/src/main.rs | 6 ++-- testing/testing-utils/src/generators.rs | 15 ++++------ 32 files changed, 108 insertions(+), 277 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 20d1cfe9f1d..0a7dd2a1178 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1377,7 +1377,7 @@ mod tests { use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, Sealable, B256}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1598,7 +1598,7 @@ mod tests { // receipts root computation is different for OP let receipts_root = calculate_receipt_root(&receipts); - let sealed = Header { + let header = Header { number, parent_hash: parent.unwrap_or_default(), gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, @@ -1620,13 +1620,11 @@ mod tests { ), )])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlockWithSenders::new( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 564df9fe341..650fcc3bbce 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -4,7 +4,7 @@ use crate::{ }; use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; -use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; @@ -160,11 +160,8 @@ impl TestBlockBuilder { ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let block = SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 8a1c95d73ce..5bcf0cae7e9 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -106,14 +106,12 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); - let sealed = Header::default().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = Header::default(); + let header = SealedHeader::seal(header); cache.insert(header.clone()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 17d5d2281a3..d91280eac88 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -410,7 +410,6 @@ impl PipelineState { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; @@ -599,9 +598,7 @@ mod tests { header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } @@ -617,14 +614,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..10); // set up a pipeline diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 3c69e7f55c3..0ad4c595f1b 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -4,7 +4,7 @@ use crate::{ BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -402,9 +402,8 @@ where BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) .expect("failed to create tree"), )); - let sealed = self.base_config.chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let genesis_block = SealedHeader::new(header, seal); + let header = self.base_config.chain_spec.genesis_header().clone(); + let genesis_block = SealedHeader::seal(header); let blockchain_provider = BlockchainProvider::with_blocks( provider_factory.clone(), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index c10116f2276..af1bbfdbdd3 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -326,7 +326,7 @@ mod tests { }; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, - Sealable, U256, + U256, }; use mockall::mock; use rand::Rng; @@ -495,12 +495,9 @@ mod tests { let ommers = Vec::new(); let transactions = Vec::new(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - ( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: None }, }, parent, @@ -519,15 +516,13 @@ mod tests { .collect(), ); - let sealed = Header { + let header = Header { withdrawals_root: Some(proofs::calculate_withdrawals_root(&withdrawals)), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { withdrawals: Some(withdrawals), ..Default::default() }, } }; @@ -558,16 +553,14 @@ mod tests { // create a tx with 10 blobs let transaction = mock_blob_tx(1, 10); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), blob_gas_used: Some(1), transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); let body = BlockBody { transactions: vec![transaction], diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 78e21a7b5ef..c267203d851 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,7 +230,7 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; - use alloy_primitives::{BlockNumber, Sealable, B256}; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; @@ -267,14 +267,12 @@ mod tests { let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let tip = client.highest_block().expect("there should be blocks here").hash(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 667808a4d62..cb43be3c4de 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -309,7 +309,6 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; - use alloy_primitives::Sealable; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; @@ -333,14 +332,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index f17766a43ed..c1b534ebf5e 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Sealable, B256}; +use alloy_primitives::B256; use reth_chainspec::ChainSpec; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockBody, SealedHeader}; @@ -76,9 +76,7 @@ pub fn insert_headers_into_client( header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e89960d9870..755ee1106a6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2597,7 +2597,7 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; - use alloy_primitives::{Bytes, Sealable}; + use alloy_primitives::Bytes; use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; @@ -2709,9 +2709,8 @@ mod tests { let (from_tree_tx, from_tree_rx) = unbounded_channel(); - let sealed = chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = chain_spec.genesis_header().clone(); + let header = SealedHeader::seal(header); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index d5cf692928f..3dab9849f6c 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -236,7 +236,7 @@ impl Consensu #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_primitives::proofs; @@ -321,16 +321,14 @@ mod tests { // that the header is valid let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; assert_eq!( - EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::new(header, seal)), + EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal(header,)), Ok(()) ); } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 9f539a5774d..f0104032aa0 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, io, path::Path}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockHash, BlockNumber, B256}; use futures::Future; use itertools::Either; use reth_network_p2p::{ @@ -114,11 +114,7 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. pub fn tip_header(&self) -> Option { - self.headers.get(&self.max_block()?).map(|h| { - let sealed = h.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } /// Returns true if all blocks are canonical (no gaps) diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 9532b4b3a35..125eef6d3eb 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -4,7 +4,7 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; @@ -250,15 +250,7 @@ where ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); - let sealed_headers = headers - .into_par_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - - SealedHeader::new(header, seal) - }) - .collect::>(); + let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::>(); for parent in sealed_headers { // Validate that the header is the parent header of the last validated header. if let Some(validated_header) = @@ -384,9 +376,8 @@ where .into()) } - let sealed_target = headers.swap_remove(0).seal_slow(); - let (header, seal) = sealed_target.into_parts(); - let target = SealedHeader::new(header, seal); + let header = headers.swap_remove(0); + let target = SealedHeader::seal(header); match sync_target { SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => { diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index 923ad996937..baea409f20e 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] -use alloy_primitives::Sealable; use reth_primitives::SealedHeader; /// Returns a new [`SealedHeader`] that's the child header of the given `parent`. @@ -10,7 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; child.parent_hash = parent.hash_slow(); - let sealed = child.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(child) } diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 8f176f8da8a..8fcacd140b0 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -199,15 +199,8 @@ where ResponseResult::Header(res) => { match res { Ok(maybe_header) => { - let (peer, maybe_header) = maybe_header - .map(|h| { - h.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - }) - .split(); + let (peer, maybe_header) = + maybe_header.map(|h| h.map(SealedHeader::seal)).split(); if let Some(header) = maybe_header { if header.hash() == this.hash { this.header = Some(header); @@ -457,17 +450,8 @@ where } fn on_headers_response(&mut self, headers: WithPeerId>) { - let (peer, mut headers_falling) = headers - .map(|h| { - h.into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>() - }) - .split(); + let (peer, mut headers_falling) = + headers.map(|h| h.into_iter().map(SealedHeader::seal).collect::>()).split(); // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { @@ -707,9 +691,7 @@ mod tests { header.parent_hash = hash; header.number += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index d8d4bbc6b7a..8892a010b43 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -10,7 +10,6 @@ use crate::{ }, priority::Priority, }; -use alloy_primitives::Sealable; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_eth_wire_types::HeadersDirection; @@ -160,16 +159,8 @@ impl Stream for TestDownload { match ready!(this.get_or_init_fut().poll_unpin(cx)) { Ok(resp) => { // Skip head and seal headers - let mut headers = resp - .1 - .into_iter() - .skip(1) - .map(|header| { - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>(); + let mut headers = + resp.1.into_iter().skip(1).map(SealedHeader::seal).collect::>(); headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 45281dff0bd..e52af4b46fe 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -3,7 +3,6 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::Sealable; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; use reth_consensus::Consensus; @@ -44,13 +43,12 @@ where { let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split(); - let Some(sealed_header) = response.map(|block| block.seal_slow()) else { + let Some(header) = response else { client.report_bad_message(peer_id); eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") }; - let (header, seal) = sealed_header.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header); let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 145e2722bfa..8364b85b3aa 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -156,9 +156,7 @@ impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { let header = Header::arbitrary(u)?; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Self::new(header, seal)) + Ok(Self::seal(header)) } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 6743cab3dc3..3ce4947ccc2 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,7 +1,7 @@ use crate::{GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; use alloc::vec::Vec; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -25,9 +25,7 @@ pub struct Block { impl Block { /// Calculate the header hash and seal the block so that it can't be changed. pub fn seal_slow(self) -> SealedBlock { - let sealed = self.header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body: self.body } + SealedBlock { header: SealedHeader::seal(self.header), body: self.body } } /// Seal the block with a known hash. diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index f341fd0474c..78b0351d4a5 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,7 +1,7 @@ //! Some payload tests use alloy_eips::eip4895::Withdrawals; -use alloy_primitives::{Bytes, Sealable, U256}; +use alloy_primitives::{Bytes, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, @@ -24,10 +24,8 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body.transactions); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers); - let sealed = transformed.header.seal_slow(); - let (header, seal) = sealed.into_parts(); block_to_payload(SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(transformed.header), body: transformed.body, }) } diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index e6ae33f9c29..c1c3ff89d72 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,5 +1,5 @@ #![allow(unreachable_pub)] -use alloy_primitives::{Address, Sealable, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use itertools::concat; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ @@ -147,9 +147,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *second_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_second }; + *second_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_second }; let offset = transitions.len() as u64; @@ -182,9 +180,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_last = last_block.clone(); let mut updated_header = cloned_last.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *last_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_last }; + *last_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_last }; db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 88d5f830378..d49a2975ad0 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_primitives::{BlockNumber, Sealable}; +use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; @@ -276,11 +276,8 @@ where let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - let sealed = block.header.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::new(header, seal)), + block: Box::new(SealedHeader::seal(block.header.clone())), error: BlockErrorKind::Execution(error), }) })?; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 2be78b88169..613e73194f4 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -392,7 +392,7 @@ mod tests { use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; @@ -509,9 +509,7 @@ mod tests { // validate the header let header = provider.header_by_number(block_num)?; assert!(header.is_some()); - let sealed = header.unwrap().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header.unwrap()); assert_eq!(header.hash(), hash); // validate the header total difficulty diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index d1d3496d917..2d2503b5391 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; @@ -276,10 +276,7 @@ where // Reset the checkpoint self.save_execution_checkpoint(provider, None)?; - let sealed = target_block.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(trie_root, SealedHeader::new(header, seal), to_block)?; + validate_state_root(trie_root, SealedHeader::seal(target_block), to_block)?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -332,10 +329,7 @@ where .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; - let sealed = target.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(block_root, SealedHeader::new(header, seal), input.unwind_to)?; + validate_state_root(block_root, SealedHeader::seal(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -538,9 +532,7 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_head = SealedBlock { header: SealedHeader::new(header, seal), body }; + let sealed_head = SealedBlock { header: SealedHeader::seal(header), body }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 98f7820e34a..0eb88c1f9ef 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -10,7 +10,7 @@ use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::models::BlockNumberAddress; @@ -1324,34 +1324,20 @@ impl BlockReaderIdExt for ConsistentProvider { Ok(self.canonical_in_memory_state.get_finalized_header()) } BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c859ddba8a5..d3dde5b0d3b 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -11,7 +11,7 @@ use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -847,34 +847,20 @@ where BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 19a6cbf6a5c..9afc77ef701 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -2,8 +2,7 @@ use crate::{DBProvider, DatabaseProviderRW, ExecutionOutcome}; use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ - b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Sealable, TxKind, B256, - U256, + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, }; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; @@ -233,9 +232,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { header.number = number; header.state_root = state_root; header.parent_hash = B256::ZERO; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x30; 20])] }, execution_outcome) } @@ -299,9 +296,7 @@ fn block2( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -365,9 +360,7 @@ fn block3( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -456,9 +449,7 @@ fn block4( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -544,9 +535,7 @@ fn block5( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 6e4331566db..9bc75f53d18 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -13,8 +13,7 @@ use alloy_eips::{ use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, - Address, BlockHash, BlockNumber, Bytes, Sealable, StorageKey, StorageValue, TxHash, TxNumber, - B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; use reth_chainspec::{ChainInfo, ChainSpec}; @@ -218,11 +217,7 @@ impl HeaderProvider for MockEthProvider { } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - Ok(self.header_by_number(number)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - })) + Ok(self.header_by_number(number)?.map(SealedHeader::seal)) } fn sealed_headers_while( @@ -233,11 +228,7 @@ impl HeaderProvider for MockEthProvider { Ok(self .headers_range(range)? .into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + .map(SealedHeader::seal) .take_while(|h| predicate(h)) .collect()) } @@ -566,14 +557,7 @@ impl BlockReaderIdExt for MockEthProvider { } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - self.header_by_id(id)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 01238be745e..c78ec5f8b80 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -3,7 +3,7 @@ use crate::{ TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, @@ -243,14 +243,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } /// Returns the sealed header with the matching `BlockId` from the database. diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 608f8d5745a..91b91fe8157 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -8,7 +8,7 @@ use crate::{ BlockInfo, PoolTransaction, }; use alloy_eips::BlockNumberOrTag; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; +use alloy_primitives::{Address, BlockHash, BlockNumber}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -106,9 +106,7 @@ pub async fn maintain_transaction_pool( let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { - let sealed = latest.seal_slow(); - let (header, seal) = sealed.into_parts(); - let latest = SealedHeader::new(header, seal); + let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { block_gas_limit: latest.gas_limit, diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index c3e30fa1cee..0f7d1a269f3 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, Sealable, B256}; +use alloy_primitives::{Address, B256}; use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; @@ -63,9 +63,7 @@ fn header_provider_example(provider: T, number: u64) -> eyre: // We can convert a header to a sealed header which contains the hash w/o needing to re-compute // it every time. - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_header = SealedHeader::new(header, seal); + let sealed_header = SealedHeader::seal(header); // Can also query the header by hash! let header_by_hash = diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index c24840a2633..3457eb5f203 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -2,7 +2,7 @@ use alloy_consensus::{Transaction as _, TxLegacy}; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; -use alloy_primitives::{Address, BlockNumber, Bytes, Sealable, TxKind, B256, U256}; +use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, @@ -106,9 +106,7 @@ pub fn random_header(rng: &mut R, number: u64, parent: Option) -> parent_hash: parent.unwrap_or_default(), ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(header) } /// Generates a random legacy [Transaction]. @@ -203,7 +201,7 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) }); let withdrawals_root = withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); - let sealed = Header { + let header = Header { parent_hash: block_params.parent.unwrap_or_default(), number, gas_used: total_gas, @@ -215,13 +213,10 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) requests_hash: None, withdrawals_root, ..Default::default() - } - .seal_slow(); - - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, } } From 9e77d916e1b6758313d33ac0d6874476e1cbf7eb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Nov 2024 14:35:59 +0100 Subject: [PATCH 457/970] chore(sdk): improve usability tx primitive traits (#12437) --- crates/primitives-traits/src/lib.rs | 14 +++- .../primitives-traits/src/transaction/mod.rs | 72 ++++++++++--------- .../src/transaction/signed.rs | 29 +++++--- crates/primitives-traits/src/tx_type.rs | 18 +++-- crates/primitives/Cargo.toml | 1 + crates/primitives/src/transaction/mod.rs | 18 ++++- crates/primitives/src/transaction/tx_type.rs | 21 +++++- 7 files changed, 118 insertions(+), 55 deletions(-) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 6848da45814..ab3985158d8 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -27,7 +27,7 @@ pub use receipt::{FullReceipt, Receipt}; pub mod transaction; pub use transaction::{ signed::{FullSignedTx, SignedTransaction}, - FullTransaction, Transaction, + FullTransaction, Transaction, TransactionExt, }; mod integer_list; @@ -80,3 +80,15 @@ pub use size::InMemorySize; /// Node traits pub mod node; pub use node::{FullNodePrimitives, NodePrimitives}; + +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +pub trait MaybeArbitrary {} + +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +impl MaybeArbitrary for T {} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index d5061ca3909..bb6f6a711e3 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,17 +1,20 @@ //! Transaction abstraction -use core::{fmt, hash::Hash}; +pub mod signed; -use alloy_primitives::{TxKind, B256}; +use core::{fmt, hash::Hash}; +use alloy_primitives::B256; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; -use crate::InMemorySize; +use crate::{InMemorySize, MaybeArbitrary, TxType}; -pub mod signed; +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullTransaction: Transaction + Compact {} + +impl FullTransaction for T where T: Transaction + Compact {} -#[allow(dead_code)] /// Abstraction of a transaction. pub trait Transaction: Send @@ -24,41 +27,42 @@ pub trait Transaction: + PartialEq + Hash + Serialize - + alloy_rlp::Encodable - + alloy_rlp::Decodable + for<'de> Deserialize<'de> - + alloy_consensus::Transaction + + TransactionExt + InMemorySize + MaybeArbitrary { - /// Heavy operation that return signature hash over rlp encoded transaction. - /// It is only for signature signing or signer recovery. - fn signature_hash(&self) -> B256; - - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> TxKind; - - /// Returns true if the tx supports dynamic fees - fn is_dynamic_fee(&self) -> bool; - - /// Returns the effective gas price for the given base fee. - fn effective_gas_price(&self, base_fee: Option) -> u128; - - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - fn encode_without_signature(&self, out: &mut dyn bytes::BufMut); } -#[cfg(not(feature = "arbitrary"))] -/// Helper trait that requires arbitrary implementation if the feature is enabled. -pub trait MaybeArbitrary {} +impl Transaction for T where + T: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + Eq + + PartialEq + + Hash + + Serialize + + for<'de> Deserialize<'de> + + TransactionExt + + InMemorySize + + MaybeArbitrary +{ +} -#[cfg(feature = "arbitrary")] -/// Helper trait that requires arbitrary implementation if the feature is enabled. -pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} +/// Extension trait of [`alloy_consensus::Transaction`]. +pub trait TransactionExt: alloy_consensus::Transaction { + /// Transaction envelope type ID. + type Type: TxType; -/// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} + /// Heavy operation that return signature hash over rlp encoded transaction. + /// It is only for signature signing or signer recovery. + fn signature_hash(&self) -> B256; -impl FullTransaction for T where T: Transaction + Compact {} + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } +} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 02e908aec6c..455a9886eb8 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -2,17 +2,18 @@ use alloc::fmt; use core::hash::Hash; -use reth_codecs::Compact; -use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; -use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, TxHash, B256}; +use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; +use reth_codecs::Compact; use revm_primitives::TxEnv; +use crate::{transaction::TransactionExt, FullTransaction, MaybeArbitrary, Transaction}; + /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullSignedTx: SignedTransaction + Compact {} +pub trait FullSignedTx: SignedTransaction + Compact {} -impl FullSignedTx for T where T: SignedTransaction + Compact {} +impl FullSignedTx for T where T: SignedTransaction + Compact {} /// A signed transaction. pub trait SignedTransaction: @@ -31,6 +32,8 @@ pub trait SignedTransaction: + alloy_rlp::Decodable + Encodable2718 + Decodable2718 + + TransactionExt + + MaybeArbitrary { /// Transaction type that is signed. type Transaction: Transaction; @@ -42,7 +45,7 @@ pub trait SignedTransaction: fn transaction(&self) -> &Self::Transaction; /// Returns reference to signature. - fn signature(&self) -> &Signature; + fn signature(&self) -> &PrimitiveSignature; /// Recover signer from signature and hash. /// @@ -65,8 +68,10 @@ pub trait SignedTransaction: /// Create a new signed transaction from a transaction and its signature. /// /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature(transaction: Self::Transaction, signature: Signature) - -> Self; + fn from_transaction_and_signature( + transaction: Self::Transaction, + signature: PrimitiveSignature, + ) -> Self; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. @@ -77,3 +82,11 @@ pub trait SignedTransaction: /// Fills [`TxEnv`] with an [`Address`] and transaction. fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); } + +impl TransactionExt for T { + type Type = ::Type; + + fn signature_hash(&self) -> B256 { + self.transaction().signature_hash() + } +} diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index e0bf28d2a99..078d8ac947b 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -1,8 +1,6 @@ use core::fmt; -use alloy_eips::eip2718::Eip2718Error; use alloy_primitives::{U64, U8}; -use alloy_rlp::{Decodable, Encodable}; use reth_codecs::Compact; /// Helper trait that unifies all behaviour required by transaction type ID to support full node @@ -26,11 +24,11 @@ pub trait TxType: + PartialEq + Into + Into - + TryFrom - + TryFrom + + TryFrom + + TryFrom + TryFrom - + Encodable - + Decodable + + alloy_rlp::Encodable + + alloy_rlp::Decodable { } @@ -48,10 +46,10 @@ impl TxType for T where + PartialEq + Into + Into - + TryFrom - + TryFrom + + TryFrom + + TryFrom + TryFrom - + Encodable - + Decodable + + alloy_rlp::Encodable + + alloy_rlp::Decodable { } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 1a4c33c7180..34d04c94edc 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -150,6 +150,7 @@ test-utils = [ "reth-chainspec/test-utils", "reth-codecs?/test-utils", "reth-trie-common/test-utils", + "arbitrary", ] serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f0caa2863aa..d1a95b09be2 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -68,7 +68,7 @@ use tx_type::{ }; use alloc::vec::Vec; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{transaction::TransactionExt, SignedTransaction}; use revm_primitives::{AuthorizationList, TxEnv}; /// Either a transaction hash or number. @@ -846,6 +846,22 @@ impl alloy_consensus::Transaction for Transaction { } } +impl TransactionExt for Transaction { + type Type = TxType; + + fn signature_hash(&self) -> B256 { + match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip4844(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + #[cfg(feature = "optimism")] + _ => todo!("use op type for op"), + } + } +} + /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 0cfb2ff9d67..46e37086113 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -4,6 +4,7 @@ use alloy_consensus::constants::{ }; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use derive_more::Display; use serde::{Deserialize, Serialize}; /// Identifier parameter for legacy transaction @@ -36,24 +37,42 @@ pub const DEPOSIT_TX_TYPE_ID: u8 = 126; /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). #[derive( - Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Serialize, + Deserialize, + Hash, + Display, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[display("tx type: {_variant}")] pub enum TxType { /// Legacy transaction pre EIP-2929 #[default] + #[display("legacy (0)")] Legacy = 0_isize, /// AccessList transaction + #[display("eip2930 (1)")] Eip2930 = 1_isize, /// Transaction with Priority fee + #[display("eip1559 (2)")] Eip1559 = 2_isize, /// Shard Blob Transactions - EIP-4844 + #[display("eip4844 (3)")] Eip4844 = 3_isize, /// EOA Contract Code Transactions - EIP-7702 + #[display("eip7702 (4)")] Eip7702 = 4_isize, /// Optimism Deposit transaction. #[cfg(feature = "optimism")] + #[display("deposit (126)")] Deposit = 126_isize, } From e6f3191c62cf92339dd682f1654ba2b450a115be Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 15:16:16 +0100 Subject: [PATCH 458/970] chore: rm cfg imports (#12518) --- crates/primitives-traits/src/header/sealed.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 8364b85b3aa..5dd9fcf0d5f 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,11 +1,8 @@ -use crate::InMemorySize; - use super::Header; +use crate::InMemorySize; use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable}; -#[cfg(any(test, feature = "test-utils"))] -use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use core::mem; @@ -130,17 +127,17 @@ impl SealedHeader { } /// Updates the block number. - pub fn set_block_number(&mut self, number: BlockNumber) { + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { self.header.number = number; } /// Updates the block state root. - pub fn set_state_root(&mut self, state_root: B256) { + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { self.header.state_root = state_root; } /// Updates the block difficulty. - pub fn set_difficulty(&mut self, difficulty: U256) { + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { self.header.difficulty = difficulty; } } From 001f3899fdeaa01bb3acf9bb7258dffbbc89f050 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:41:25 +0100 Subject: [PATCH 459/970] primitives: rm alloy `Header` reexport (#12515) --- Cargo.lock | 28 +++++-- crates/blockchain-tree/src/block_indices.rs | 3 +- crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/chain-state/Cargo.toml | 19 +++-- crates/chain-state/src/in_memory.rs | 3 +- crates/chain-state/src/test_utils.rs | 6 +- crates/chainspec/src/api.rs | 2 +- crates/chainspec/src/spec.rs | 7 +- crates/cli/commands/Cargo.toml | 38 +++++----- crates/cli/commands/src/db/get.rs | 2 +- .../commands/src/init_state/without_evm.rs | 3 +- .../cli/commands/src/test_vectors/tables.rs | 3 +- crates/consensus/beacon/Cargo.toml | 19 ++--- .../beacon/src/engine/invalid_headers.rs | 3 +- crates/consensus/beacon/src/engine/mod.rs | 3 +- crates/consensus/beacon/src/engine/sync.rs | 3 +- crates/consensus/common/src/validation.rs | 6 +- crates/consensus/consensus/Cargo.toml | 12 +-- crates/consensus/consensus/src/lib.rs | 3 +- crates/engine/invalid-block-hooks/Cargo.toml | 1 + .../engine/invalid-block-hooks/src/witness.rs | 3 +- crates/engine/tree/Cargo.toml | 37 +++++----- crates/engine/tree/src/backfill.rs | 3 +- crates/engine/tree/src/download.rs | 3 +- crates/engine/tree/src/tree/mod.rs | 5 +- crates/engine/util/src/reorg.rs | 4 +- crates/ethereum/consensus/src/lib.rs | 4 +- crates/ethereum/evm/src/lib.rs | 10 +-- crates/ethereum/node/Cargo.toml | 28 +++---- crates/ethereum/node/src/node.rs | 3 +- crates/ethereum/payload/src/lib.rs | 4 +- crates/evm/src/provider.rs | 2 +- crates/evm/src/system_calls/eip2935.rs | 2 +- crates/evm/src/system_calls/eip4788.rs | 2 +- crates/evm/src/system_calls/eip7002.rs | 2 +- crates/evm/src/system_calls/eip7251.rs | 2 +- crates/evm/src/system_calls/mod.rs | 3 +- crates/exex/exex/src/backfill/test_utils.rs | 4 +- crates/net/downloaders/src/bodies/bodies.rs | 4 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 6 +- crates/net/downloaders/src/bodies/task.rs | 2 +- crates/net/downloaders/src/file_client.rs | 3 +- crates/net/eth-wire-types/Cargo.toml | 34 ++++----- crates/net/eth-wire-types/src/blocks.rs | 8 +- crates/net/eth-wire-types/src/header.rs | 3 +- crates/net/eth-wire-types/src/primitives.rs | 2 +- crates/net/network/src/eth_requests.rs | 3 +- crates/net/network/src/state.rs | 3 +- crates/net/network/tests/it/requests.rs | 4 +- crates/net/p2p/src/bodies/response.rs | 2 +- crates/net/p2p/src/headers/client.rs | 2 +- crates/net/p2p/src/lib.rs | 4 +- crates/net/p2p/src/test_utils/full_block.rs | 3 +- crates/net/p2p/src/test_utils/headers.rs | 3 +- crates/node/api/Cargo.toml | 4 +- crates/node/api/src/node.rs | 2 +- crates/node/builder/Cargo.toml | 33 +++++---- crates/node/builder/src/components/builder.rs | 2 +- crates/node/builder/src/components/execute.rs | 2 +- crates/node/builder/src/components/mod.rs | 2 +- crates/node/builder/src/launch/common.rs | 2 +- crates/node/builder/src/setup.rs | 2 +- crates/optimism/chainspec/src/lib.rs | 2 +- crates/optimism/consensus/src/lib.rs | 6 +- crates/optimism/evm/src/execute.rs | 4 +- crates/optimism/evm/src/l1.rs | 3 +- crates/optimism/evm/src/lib.rs | 7 +- crates/optimism/node/Cargo.toml | 69 +++++++++--------- crates/optimism/node/src/node.rs | 3 +- crates/optimism/payload/src/builder.rs | 4 +- crates/optimism/primitives/Cargo.toml | 2 - crates/optimism/primitives/src/bedrock.rs | 3 +- crates/optimism/rpc/src/eth/call.rs | 6 +- crates/optimism/rpc/src/eth/mod.rs | 2 +- crates/optimism/rpc/src/eth/pending_block.rs | 3 +- crates/payload/builder/Cargo.toml | 9 ++- crates/payload/builder/src/lib.rs | 3 +- crates/primitives-traits/src/header/mod.rs | 3 +- .../src/header/test_utils.rs | 2 +- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives/src/block.rs | 3 +- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/proofs.rs | 4 +- crates/rpc/rpc-builder/Cargo.toml | 4 +- crates/rpc/rpc-builder/src/eth.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 10 +-- crates/rpc/rpc-eth-api/src/helpers/block.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 4 +- .../rpc-eth-api/src/helpers/pending_block.rs | 5 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 3 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 3 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 3 +- crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- .../rpc-types-compat/src/engine/payload.rs | 4 +- crates/rpc/rpc/src/eth/core.rs | 3 +- crates/rpc/rpc/src/eth/helpers/call.rs | 2 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/trace.rs | 2 +- crates/rpc/rpc/src/trace.rs | 2 +- crates/stages/stages/Cargo.toml | 39 +++++----- crates/stages/stages/src/stages/bodies.rs | 3 +- crates/stages/stages/src/stages/execution.rs | 3 +- crates/stages/stages/src/stages/headers.rs | 8 +- crates/storage/db-api/Cargo.toml | 45 ++++++------ crates/storage/db-api/src/models/blocks.rs | 2 +- crates/storage/db-api/src/models/mod.rs | 5 +- crates/storage/db/Cargo.toml | 41 +++++------ .../storage/db/src/implementation/mdbx/mod.rs | 3 +- crates/storage/db/src/static_file/masks.rs | 2 +- crates/storage/db/src/tables/mod.rs | 3 +- crates/storage/provider/Cargo.toml | 73 +++++++++---------- .../src/providers/blockchain_provider.rs | 5 +- .../provider/src/providers/consistent.rs | 5 +- .../provider/src/providers/database/mod.rs | 3 +- .../src/providers/database/provider.rs | 7 +- crates/storage/provider/src/providers/mod.rs | 5 +- .../provider/src/providers/static_file/jar.rs | 3 +- .../src/providers/static_file/manager.rs | 3 +- .../provider/src/providers/static_file/mod.rs | 4 +- .../src/providers/static_file/writer.rs | 3 +- .../storage/provider/src/test_utils/blocks.rs | 5 +- .../storage/provider/src/test_utils/mock.rs | 4 +- .../storage/provider/src/test_utils/noop.rs | 6 +- crates/storage/provider/src/writer/mod.rs | 3 +- crates/storage/storage-api/src/block.rs | 3 +- crates/storage/storage-api/src/header.rs | 3 +- examples/custom-evm/Cargo.toml | 1 + examples/custom-evm/src/main.rs | 3 +- examples/stateful-precompile/Cargo.toml | 1 + examples/stateful-precompile/src/main.rs | 3 +- testing/ef-tests/Cargo.toml | 1 + testing/ef-tests/src/models.rs | 5 +- testing/testing-utils/src/generators.rs | 6 +- 134 files changed, 493 insertions(+), 439 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e1d2330091..ca5465d37bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" +checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2611,6 +2611,7 @@ dependencies = [ name = "ef-tests" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -2878,6 +2879,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -3063,6 +3065,7 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -4601,7 +4604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -6445,6 +6448,7 @@ dependencies = [ name = "reth-beacon-consensus" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -6647,6 +6651,7 @@ name = "reth-cli-commands" version = "1.1.1" dependencies = [ "ahash", + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -6788,6 +6793,7 @@ dependencies = [ name = "reth-consensus" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -6838,6 +6844,7 @@ dependencies = [ name = "reth-db" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "arbitrary", "assert_matches", @@ -6878,6 +6885,7 @@ dependencies = [ name = "reth-db-api" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "arbitrary", @@ -7206,6 +7214,7 @@ dependencies = [ name = "reth-engine-tree" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7643,6 +7652,7 @@ dependencies = [ name = "reth-invalid-block-hooks" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -7902,6 +7912,7 @@ dependencies = [ name = "reth-node-api" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-rpc-types-engine", "eyre", "reth-beacon-consensus", @@ -7912,7 +7923,6 @@ dependencies = [ "reth-node-core", "reth-node-types", "reth-payload-primitives", - "reth-primitives", "reth-provider", "reth-tasks", "reth-transaction-pool", @@ -7922,6 +7932,7 @@ dependencies = [ name = "reth-node-builder" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "aquamarine", @@ -8354,8 +8365,6 @@ dependencies = [ "bytes", "derive_more 1.0.0", "op-alloy-consensus", - "reth-primitives", - "reth-primitives-traits", ] [[package]] @@ -8412,6 +8421,7 @@ dependencies = [ name = "reth-payload-builder" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -8770,6 +8780,7 @@ dependencies = [ name = "reth-rpc-builder" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", @@ -8991,6 +9002,7 @@ dependencies = [ name = "reth-stages" version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "assert_matches", @@ -11086,7 +11098,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -11561,7 +11573,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 0c48b3b9ce8..7778fb9262c 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -377,8 +377,9 @@ impl BlockIndices { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; - use reth_primitives::{Header, SealedBlock, SealedHeader}; + use reth_primitives::{SealedBlock, SealedHeader}; #[test] fn pending_block_num_hash_returns_none_if_no_fork() { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 0a7dd2a1178..c48e1548434 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1374,7 +1374,7 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; @@ -1389,7 +1389,7 @@ mod tests { use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, - Account, BlockBody, Header, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ test_utils::{ diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 9a88a3c54bc..0a2f53715ff 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -24,6 +24,7 @@ reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } @@ -42,7 +43,6 @@ pin-project.workspace = true # optional deps for test-utils alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } rand = { workspace = true, optional = true } revm = { workspace = true, optional = true } @@ -56,13 +56,12 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm", - "reth-chainspec/test-utils", - "reth-primitives/test-utils", - "reth-trie/test-utils", - "revm?/test-utils" + "alloy-signer", + "alloy-signer-local", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", ] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 8794bb393ca..47443b36c67 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,6 +4,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; @@ -11,7 +12,7 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 650fcc3bbce..60a90e43fee 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,7 +2,7 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; @@ -12,8 +12,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index ee25f72bae8..f0cc31bb44d 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,12 +1,12 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; use core::fmt::{Debug, Display}; use reth_network_peers::NodeRecord; -use reth_primitives_traits::Header; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 779eb8a3757..fdaad948f26 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -11,7 +11,10 @@ use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; -use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; +use alloy_consensus::{ + constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, + Header, +}; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, @@ -21,7 +24,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, Header, SealedHeader}; +use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, SealedHeader}; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index a0bc5147700..7e27d9b4e2e 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -52,6 +52,7 @@ reth-trie-common = { workspace = true, optional = true } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-consensus.workspace = true itertools.workspace = true futures.workspace = true @@ -94,22 +95,23 @@ reth-discv4.workspace = true [features] default = [] arbitrary = [ - "dep:proptest", - "dep:arbitrary", - "dep:proptest-arbitrary-interop", - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-eth-wire/arbitrary", - "reth-db/arbitrary", - "reth-chainspec/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", - "reth-codecs/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils", - "reth-trie-common/test-utils", - "reth-codecs?/arbitrary", - "reth-prune-types?/arbitrary", - "reth-stages-types?/arbitrary", - "reth-trie-common?/arbitrary" + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs?/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary", + "alloy-consensus/arbitrary", ] diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 4006d1660aa..94c0f63dd30 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -7,7 +8,6 @@ use reth_db::{ use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; use reth_node_builder::NodeTypesWithDB; -use reth_primitives::Header; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 187996653c3..29fc2aec60e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,8 +1,9 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; +use alloy_consensus::Header; use reth_primitives::{ - BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 6b523c6edd1..fd7d3b3799d 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; @@ -10,7 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; -use reth_primitives::{Header, TransactionSignedNoHash}; +use reth_primitives::TransactionSignedNoHash; use std::collections::HashSet; use tracing::error; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 0139be2f680..d926fc09c35 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -34,6 +34,7 @@ reth-chainspec = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["std"] } alloy-eips.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -77,12 +78,12 @@ assert_matches.workspace = true [features] optimism = [ - "reth-blockchain-tree/optimism", - "reth-chainspec", - "reth-db-api/optimism", - "reth-db/optimism", - "reth-downloaders/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-downloaders/optimism" -] \ No newline at end of file + "reth-blockchain-tree/optimism", + "reth-chainspec", + "reth-db-api/optimism", + "reth-db/optimism", + "reth-downloaders/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-downloaders/optimism", +] diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 5bcf0cae7e9..b8d80b0ceea 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,9 +1,10 @@ +use alloy_consensus::Header; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; use std::sync::Arc; use tracing::warn; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 8d385a64b8e..03f7bf08b1e 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ @@ -20,7 +21,7 @@ use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, Header, SealedBlock, SealedHeader}; +use reth_primitives::{Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index d91280eac88..b6e75f802e3 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -410,11 +410,12 @@ impl PipelineState { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; - use reth_primitives::{BlockBody, Header, SealedHeader}; + use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ExecutionOutcome, diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index af1bbfdbdd3..62357b4b9b1 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,12 +1,10 @@ //! Collection of methods for block validation. -use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header}; use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - BlockBody, EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 2faf3f2ac71..d120d268bd9 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -25,10 +26,9 @@ derive_more.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", - "alloy-primitives/std", - "alloy-eips/std" -] -test-utils = [ - "reth-primitives/test-utils" + "reth-primitives/std", + "alloy-primitives/std", + "alloy-eips/std", + "alloy-consensus/std", ] +test-utils = ["reth-primitives/test-utils"] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 91ec42608c1..a8f0a01f22b 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -12,11 +12,12 @@ extern crate alloc; use alloc::{fmt::Debug, vec::Vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, - Header, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, + InvalidTransactionError, Receipt, SealedBlock, SealedHeader, }; /// A consensus implementation that does nothing. diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index b33b8c00a1c..462f0762a9e 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -26,6 +26,7 @@ reth-trie = { workspace = true, features = ["serde"] } alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types-debug.workspace = true +alloy-consensus.workspace = true # async futures.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 416c4adb40f..4e92411ea12 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; +use alloy_consensus::Header; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; @@ -9,7 +10,7 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 2ce18aa0e7d..fb259d08560 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -38,6 +38,7 @@ reth-trie-parallel.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true revm-primitives.workspace = true @@ -83,22 +84,22 @@ assert_matches.workspace = true [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", - "reth-blockchain-tree/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-stages-api/test-utils", - "reth-provider/test-utils", - "reth-trie/test-utils", - "reth-prune-types?/test-utils" + "reth-db/test-utils", + "reth-chain-state/test-utils", + "reth-network-p2p/test-utils", + "reth-prune-types", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-blockchain-tree/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", + "reth-prune-types?/test-utils", ] diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index c267203d851..2ed0e758d50 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,12 +230,13 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; + use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index cb43be3c4de..8a7ea583f0f 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -309,11 +309,12 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; struct TestHarness { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 755ee1106a6..adc9230af34 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -4,6 +4,7 @@ use crate::{ engine::{DownloadRequest, EngineApiEvent, FromEngine}, persistence::PersistenceHandle, }; +use alloy_consensus::Header; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -32,9 +33,7 @@ use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - Block, GotExpected, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{Block, GotExpected, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 169b6f5ede7..0a4dd8d496f 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,6 +1,6 @@ //! Stream wrapper that simulates reorgs. -use alloy_consensus::Transaction; +use alloy_consensus::{Header, Transaction}; use alloy_primitives::U256; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, @@ -16,7 +16,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; +use reth_primitives::{proofs, Block, BlockBody, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 3dab9849f6c..3dc7a02af8b 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -19,7 +19,7 @@ use reth_consensus_common::validation::{ validate_header_base_fee, validate_header_extradata, validate_header_gas, }; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, SealedBlock, SealedHeader, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 1c340c0927b..11e4acd4bfc 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -20,10 +20,11 @@ extern crate alloc; use core::convert::Infallible; use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; +use reth_primitives::{transaction::FillTxEnv, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; @@ -195,15 +196,12 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; - use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, - }; + use reth_primitives::revm_primitives::{BlockEnv, CfgEnv, SpecId}; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 69bbeeb5b43..6ecd5437bfb 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,6 +32,8 @@ reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true +alloy-consensus.workspace = true + # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -63,17 +65,17 @@ alloy-rpc-types-beacon.workspace = true [features] default = [] test-utils = [ - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-evm/test-utils" + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-evm/test-utils", ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index b37d0227a78..5265329f19a 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -25,7 +26,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt, TransactionSigned, TxType}; +use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 87ceb4200b1..c94cd8bb728 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,7 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ @@ -27,7 +27,7 @@ use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, + Block, BlockBody, EthereumHardforks, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 84c38db0dc5..0d4f45c4d9d 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,8 +1,8 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; -use reth_primitives::Header; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index edb71c8b4e0..4848feb7281 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,10 +4,10 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index bc535809680..2ad02c26eb9 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -2,11 +2,11 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 5e36f2bdeb9..f20b7a54c08 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,10 +1,10 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 7a55c7a5aea..112f724df76 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,10 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 7fdb31d967d..47fd59d735f 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -2,12 +2,13 @@ use crate::ConfigureEvm; use alloc::{boxed::Box, sync::Arc, vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header}; +use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 0a8bde24245..a1e88c7f428 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::{constants::ETH_TO_WEI, TxEip2930}; +use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -10,7 +10,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 02a36c8e8cd..bebc51ad772 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -37,7 +37,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -564,7 +564,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc>, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 54404d0da38..aa6ec9e4af0 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -58,7 +58,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc>, + consensus: Arc>, request: Vec, ) { // Set last max requested block number diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 7b99c81d89e..66287624f89 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -27,7 +27,7 @@ use std::{ /// It then proceeds to verify the downloaded bodies. In case of an validation error, /// the future will start over. /// -/// The future will filter out any empty headers (see [`reth_primitives::Header::is_empty`]) from +/// The future will filter out any empty headers (see [`alloy_consensus::Header::is_empty`]) from /// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will /// be dispatched and they will be immediately returned upon polling. /// @@ -39,7 +39,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -60,7 +60,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 2caf3199188..de1638f3e66 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -54,7 +54,7 @@ impl TaskDownloader { /// Provider: HeaderProvider + Unpin + 'static, /// >( /// client: Arc, - /// consensus: Arc>, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index f0104032aa0..486d4a05127 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, io, path::Path}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, B256}; use futures::Future; @@ -12,7 +13,7 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header, SealedHeader}; +use reth_primitives::{BlockBody, SealedHeader}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 9ce712bf87a..f9759ffc25a 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -23,6 +23,7 @@ alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } +alloy-consensus.workspace = true bytes.workspace = true derive_more.workspace = true @@ -42,27 +43,26 @@ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true -alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", - "reth-chainspec/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", "reth-primitives-traits/arbitrary", ] serde = [ - "dep:serde", - "alloy-chains/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde" + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", ] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index a7835ae8641..1eb71082d81 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -41,12 +41,12 @@ pub struct GetBlockHeaders { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -pub struct BlockHeaders( +pub struct BlockHeaders( /// The requested headers. pub Vec, ); -generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); +generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); impl From> for BlockHeaders { fn from(headers: Vec) -> Self { @@ -94,11 +94,11 @@ mod tests { message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, HeadersDirection, }; - use alloy_consensus::TxLegacy; + use alloy_consensus::{Header, TxLegacy}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{BlockBody, Header, Transaction, TransactionSigned}; + use reth_primitives::{BlockBody, Transaction, TransactionSigned}; use std::str::FromStr; #[test] diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 8c11bfa82bb..9fa3b150d9e 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,10 +87,9 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::Header; use std::str::FromStr; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 15cfaaff0a2..eab36c3b6a7 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -78,7 +78,7 @@ pub trait NetworkPrimitives: pub struct EthNetworkPrimitives; impl NetworkPrimitives for EthNetworkPrimitives { - type BlockHeader = reth_primitives::Header; + type BlockHeader = alloy_consensus::Header; type BlockBody = reth_primitives::BlockBody; type Block = reth_primitives::Block; type BroadcastedTransaction = reth_primitives::TransactionSigned; diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index f0c355b174a..1f20be53967 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -7,6 +7,7 @@ use std::{ time::Duration, }; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; @@ -17,7 +18,7 @@ use reth_eth_wire::{ use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header}; +use reth_primitives::BlockBody; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 3bafbf25856..c51f115c52f 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -563,12 +563,13 @@ mod tests { sync::{atomic::AtomicU64, Arc}, }; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthVersion}; use reth_network_api::PeerRequestSender; use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_peers::PeerId; - use reth_primitives::{BlockBody, Header}; + use reth_primitives::BlockBody; use reth_provider::test_utils::NoopProvider; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 58e46e3fb09..54e1f4e12b4 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -3,7 +3,7 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; +use alloy_consensus::{Header, TxEip2930}; use alloy_primitives::{Bytes, PrimitiveSignature as Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Transaction, TransactionSigned}; +use reth_primitives::{Block, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 8737647bd79..153d7d39d4e 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -6,7 +6,7 @@ use reth_primitives_traits::InMemorySize; #[derive(PartialEq, Eq, Debug, Clone)] pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response Empty(SealedHeader), } diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 585f2ab18a0..bb879784499 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,8 +1,8 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; -use reth_primitives::Header; use std::{ fmt::Debug, pin::Pin, diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 98d83c2d1a8..7dcb77671d4 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -55,11 +55,11 @@ impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone /// The [`BlockClient`] providing Ethereum block parts. pub trait EthBlockClient: - BlockClient
+ BlockClient
{ } impl EthBlockClient for T where - T: BlockClient
+ T: BlockClient
{ } diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 97d867531ad..ee65bcb3f07 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -5,12 +5,13 @@ use crate::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::B256; use parking_lot::Mutex; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 8892a010b43..bc5262abef4 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -10,11 +10,12 @@ use crate::{ }, priority::Priority, }; +use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use std::{ fmt, pin::Pin, diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index b2bf001862e..a4cc0eb7eb6 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -22,9 +22,9 @@ reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true -reth-primitives.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 253145ea9eb..90b9e2999bf 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,6 +1,7 @@ //! Traits for configuring a node. use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::Consensus; @@ -9,7 +10,6 @@ use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_payload_primitives::PayloadBuilder; -use reth_primitives::Header; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 4ef2b0728e0..09bdd8b2269 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -62,6 +62,7 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-consensus.workspace = true ## async futures.workspace = true @@ -96,20 +97,20 @@ tempfile.workspace = true [features] default = [] test-utils = [ - "reth-db/test-utils", - "reth-blockchain-tree/test-utils", - "reth-chain-state/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-engine-tree/test-utils", - "reth-evm/test-utils", - "reth-downloaders/test-utils", - "reth-network/test-utils", - "reth-network-p2p/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-stages/test-utils", - "reth-db-api/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils" + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", ] diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 41ce36858d8..95c0c764b5c 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,11 +7,11 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 90cff588f7c..4e8f63f412b 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,8 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; +use alloy_consensus::Header; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; -use reth_primitives::Header; use std::future::Future; /// A type that knows how to build the executor types. diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 29b667d5409..1fe35e554d5 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -22,13 +22,13 @@ pub use payload::*; pub use pool::*; use crate::{ConfigureEvm, FullNodeTypes}; +use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; use reth_transaction_pool::TransactionPool; /// An abstraction over the components of a node, consisting of: diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index ec4912fdd86..f9106296323 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -760,7 +760,7 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient
, + C: HeadersClient
, { self.node_config().max_block(client, self.provider_factory().clone()).await } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index d8405dad77f..db188402ca8 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -84,7 +84,7 @@ pub fn build_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader
+ 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, Executor: BlockExecutorProvider, { diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index c110c4b0821..d552d08f18c 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -19,6 +19,7 @@ mod op_sepolia; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Bytes, B256, U256}; pub use base::BASE_MAINNET; @@ -36,7 +37,6 @@ use reth_chainspec::{ use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; use reth_optimism_forks::OpHardforks; -use reth_primitives_traits::Header; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 476b259529e..72f67dcb450 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,7 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; @@ -21,9 +21,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; -use reth_primitives::{ - BlockBody, BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 2b004e6eb9d..b4c2e16f593 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -2,7 +2,7 @@ use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; +use alloy_consensus::{Header, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; use op_alloy_consensus::DepositTransaction; @@ -20,7 +20,7 @@ use reth_evm::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; use revm_primitives::{ db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 143399a5ab7..9d3e76fb442 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -304,8 +304,9 @@ mod tests { #[test] fn sanity_l1_block() { + use alloy_consensus::Header; use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_primitives::{Header, TransactionSigned}; + use reth_primitives::TransactionSigned; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let l1_info_tx = TransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index cfa7dfa5849..dafb1676ebd 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,13 +13,14 @@ extern crate alloc; use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_optimism_chainspec::{DecodeError, OpChainSpec}; use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, transaction::FillTxEnv, - Head, Header, TransactionSigned, + Head, TransactionSigned, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; @@ -200,7 +201,7 @@ impl ConfigureEvm for OpEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; use alloy_primitives::{bytes, Address, LogData, B256, U256}; @@ -212,7 +213,7 @@ mod tests { use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, - Account, Header, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, + Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, }; use reth_revm::{ diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index c1e23e3d571..fb8cc27787e 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -45,6 +45,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc clap.workspace = true @@ -76,42 +77,42 @@ futures.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", - "reth-optimism-consensus/optimism", - "reth-db/optimism", - "reth-optimism-node/optimism" + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-optimism-node/optimism", ] asm-keccak = [ - "reth-primitives/asm-keccak", - "reth/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak", - "reth-optimism-node/asm-keccak" + "reth-primitives/asm-keccak", + "reth/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", + "reth-optimism-node/asm-keccak", ] test-utils = [ - "reth", - "reth-e2e-test-utils", - "alloy-genesis", - "tokio", - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-optimism-node/test-utils" + "reth", + "reth-e2e-test-utils", + "alloy-genesis", + "tokio", + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-optimism-node/test-utils", ] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index efc8964ffab..0c2186c7268 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; @@ -24,7 +25,7 @@ use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt, TransactionSigned, TxType}; +use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 47ef376b705..beb9a5c4ae2 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -2,7 +2,7 @@ use std::{fmt::Display, sync::Arc}; -use alloy_consensus::{Transaction, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::merge::BEACON_NONCE; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_engine::PayloadId; @@ -18,7 +18,7 @@ use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, SealedHeader, TransactionSigned, TxType, + Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType, }; use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index bc11c358504..a6f36732672 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,8 +12,6 @@ description = "OP primitive types" workspace = true [dependencies] -reth-primitives.workspace = true -reth-primitives-traits.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true op-alloy-consensus.workspace = true diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 7153ae3155c..204b34d3378 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,8 +1,7 @@ //! OP mainnet bedrock related data. -use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; -use reth_primitives::Header; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 9ddf7b3855b..a76c25916f3 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,10 +1,8 @@ +use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; -use reth_primitives::{ - revm_primitives::{BlockEnv, OptimismFields, TxEnv}, - Header, -}; +use reth_primitives::revm_primitives::{BlockEnv, OptimismFields, TxEnv}; use reth_rpc_eth_api::{ helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, RpcNodeCore, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 624602bba38..60af6542e28 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -11,6 +11,7 @@ pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use std::{fmt, sync::Arc}; +use alloy_consensus::Header; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; @@ -18,7 +19,6 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; -use reth_primitives::Header; use reth_provider::{ BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index c90b3f7b794..8356d72dbdc 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,11 +1,12 @@ //! Loads OP pending block for a RPC response. +use alloy_consensus::Header; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{revm_primitives::BlockEnv, Header, Receipt, SealedBlockWithSenders}; +use reth_primitives::{revm_primitives::BlockEnv, Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 7a536cdbcfa..8b2fef7b878 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -39,11 +39,12 @@ tracing.workspace = true reth-primitives.workspace = true alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true [features] test-utils = [ - "alloy-primitives", - "reth-chain-state/test-utils", - "reth-primitives/test-utils", - "revm/test-utils" + "alloy-primitives", + "reth-chain-state/test-utils", + "reth-primitives/test-utils", + "revm/test-utils", ] diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 57a040a4bb4..da44072c99d 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -28,9 +28,10 @@ //! use std::pin::Pin; //! use std::sync::Arc; //! use std::task::{Context, Poll}; +//! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::{Block, Header}; +//! use reth_primitives::Block; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index fa9c3324535..760abf33720 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -7,8 +7,7 @@ pub use error::HeaderError; #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; -pub use alloy_consensus::Header; - +use alloy_consensus::Header; use alloy_primitives::{Address, BlockNumber, B256, U256}; /// Bincode-compatible header type serde implementations. diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index c5f6e86b9db..0e79f6cb462 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,6 +1,6 @@ //! Test utilities to generate random valid headers. -use crate::Header; +use alloy_consensus::Header; use alloy_primitives::B256; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index ab3985158d8..b8f0aa4c8a8 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -59,7 +59,7 @@ pub use tx_type::{FullTxType, TxType}; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{Header, HeaderError, SealedHeader}; +pub use header::{HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 3ce4947ccc2..703f9d33169 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,5 +1,6 @@ -use crate::{GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; +use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; use alloc::vec::Vec; +use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index f44e1ee6a09..534b525f086 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -41,8 +41,8 @@ pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ - logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, SealedHeader, StorageEntry, + logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, HeaderError, Log, LogData, + SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 10b7bc2530b..1712112281f 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,8 +1,8 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; +use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; use alloc::vec::Vec; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index b6ae86c7408..04e97f99e34 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -29,7 +29,8 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true + +alloy-consensus.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -63,6 +64,7 @@ reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-primitives.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 40acecfedf3..e88f6aa86bb 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,5 @@ +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 40e40962349..27eceed98cb 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,9 +16,9 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use alloy_consensus::Header; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -73,10 +73,10 @@ //! //! //! ``` +//! use alloy_consensus::Header; //! use reth_engine_primitives::EngineTypes; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -167,6 +167,7 @@ use std::{ }; use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; +use alloy_consensus::Header; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -183,7 +184,6 @@ use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::Header; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, StateProviderFactory, @@ -259,7 +259,7 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, { @@ -679,11 +679,11 @@ where /// # Example /// /// ```no_run + /// use alloy_consensus::Header; /// use reth_consensus::noop::NoopConsensus; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::Header; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 7125857b898..251ca225eb1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -163,7 +163,7 @@ pub trait EthBlocks: LoadBlock { fn ommers( &self, block_id: BlockId, - ) -> Result>, Self::Error> { + ) -> Result>, Self::Error> { self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index ef29f807026..6a5506ad2ad 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -5,7 +5,7 @@ use crate::{ AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, RpcNodeCore, }; -use alloy_consensus::BlockHeader; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types_eth::{ @@ -22,7 +22,7 @@ use reth_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, ResultAndState, TransactTo, TxEnv, }, - Header, TransactionSigned, + TransactionSigned, }; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 0173485aef5..490447d6152 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; @@ -24,8 +24,7 @@ use reth_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockBody, Header, Receipt, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, + Block, BlockBody, Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7bc365d91c4..7ff9fa4deff 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,7 +1,7 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; @@ -10,7 +10,6 @@ use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::Header; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 36d901fda5f..104042d17a2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -3,12 +3,13 @@ use std::{fmt::Display, sync::Arc}; use crate::{FromEvmError, RpcNodeCore}; +use alloy_consensus::Header; use alloy_primitives::B256; use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{Header, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index b6b0364c477..b4a110e96af 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,5 +1,6 @@ //! Async caching support for eth RPC +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; @@ -7,7 +8,7 @@ use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 91aaa25430e..9807010c0cf 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -275,7 +275,7 @@ pub fn build_block>( let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; - let header = reth_primitives::Header { + let header = alloy_consensus::Header { beneficiary: block_env.coinbase, difficulty: block_env.difficulty, number: block_env.number.to(), diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 9050b0cced1..7f260a7693c 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,7 +1,7 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4895::Withdrawals, @@ -15,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, Header, SealedBlock, TransactionSigned, + Block, BlockBody, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index f945aa446b5..d6c8f522cda 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -405,6 +405,7 @@ impl EthApiInner Stage for HeaderStage where P: HeaderSyncGapProvider, - D: HeaderDownloader
, + D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, { /// Return the id of the stage @@ -441,7 +441,7 @@ mod tests { } } - impl + 'static> StageTestRunner + impl + 'static> StageTestRunner for HeadersTestRunner { type S = HeaderStage, D>; @@ -461,7 +461,7 @@ mod tests { } } - impl + 'static> ExecuteStageTestRunner + impl + 'static> ExecuteStageTestRunner for HeadersTestRunner { type Seed = Vec; @@ -539,7 +539,7 @@ mod tests { } } - impl + 'static> UnwindStageTestRunner + impl + 'static> UnwindStageTestRunner for HeadersTestRunner { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index f827e48c8c3..9b8589cb6aa 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -25,6 +25,7 @@ reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true @@ -57,29 +58,27 @@ proptest-arbitrary-interop.workspace = true [features] test-utils = [ - "arbitrary", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils", - "reth-db-models/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-models/arbitrary", - "dep:arbitrary", - "dep:proptest", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "parity-scale-codec/arbitrary", - "reth-codecs/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-codecs/optimism" + "reth-primitives/arbitrary", + "reth-db-models/arbitrary", + "dep:arbitrary", + "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-codecs/optimism"] diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 7268d82dd3c..0145ceb52b5 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -1,8 +1,8 @@ //! Block related models and types. +use alloy_consensus::Header; use alloy_primitives::B256; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Header; use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index fc3351b73b6..00787194c71 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -4,12 +4,11 @@ use crate::{ table::{Compress, Decode, Decompress, Encode}, DatabaseError, }; +use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{ - Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash, TxType, -}; +use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash, TxType}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 324411613fc..6042b5faa81 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -26,6 +26,7 @@ reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true # mdbx reth-libmdbx = { workspace = true, optional = true, features = [ @@ -90,31 +91,29 @@ mdbx = [ "dep:rustc-hash", ] test-utils = [ - "dep:tempfile", - "arbitrary", - "parking_lot", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-db-api/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] bench = [] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-db-api/optimism" + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-db-api/optimism"] disable-lock = [] [[bench]] diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 78a3f7971da..10f3b228230 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -497,6 +497,7 @@ mod tests { test_utils::*, AccountChangeSets, }; + use alloy_consensus::Header; use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, @@ -504,7 +505,7 @@ mod tests { table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Header, StorageEntry}; + use reth_primitives::{Account, StorageEntry}; use reth_primitives_traits::IntegerList; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index ac2811a44d7..405606389ba 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -4,9 +4,9 @@ use crate::{ static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, HeaderTerminalDifficulties, RawValue, Receipts, Transactions, }; +use alloy_consensus::Header; use alloy_primitives::BlockHash; use reth_db_api::table::Table; -use reth_primitives::Header; // HEADER MASKS add_static_file_mask!(HeaderMask, Header, 0b001); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index cf7d23a1272..aafdf606bb3 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -19,6 +19,7 @@ pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; #[cfg(feature = "mdbx")] pub(crate) mod utils; +use alloy_consensus::Header; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_db_api::{ models::{ @@ -30,7 +31,7 @@ use reth_db_api::{ }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash}; +use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash}; use reth_primitives_traits::IntegerList; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 04a0bf42908..399e3e000b9 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -38,6 +38,7 @@ reth-node-types.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true revm.workspace = true # optimism @@ -65,7 +66,6 @@ strum.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -88,44 +88,43 @@ alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-primitives", - "reth-codecs/optimism", - "reth-db/optimism", - "reth-db-api/optimism", - "revm/optimism" + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-primitives", + "reth-codecs/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism", ] serde = [ - "reth-execution-types/serde", - "reth-trie-db/serde", - "reth-trie/serde", - "alloy-consensus?/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine/serde", - "dashmap/serde", - "notify/serde", - "parking_lot/serde", - "rand/serde", - "revm/serde", - "reth-codecs/serde" + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-rpc-types-engine/serde", + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "revm/serde", + "reth-codecs/serde", ] test-utils = [ - "reth-db/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie/test-utils", - "reth-chain-state/test-utils", - "reth-ethereum-engine-primitives", - "alloy-consensus", - "reth-chainspec/test-utils", - "reth-evm/test-utils", - "reth-network-p2p/test-utils", - "reth-primitives/test-utils", - "reth-codecs/test-utils", - "reth-db-api/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "reth-db/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie/test-utils", + "reth-chain-state/test-utils", + "reth-ethereum-engine-primitives", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index dbfb4f7b872..0f0693471b0 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,6 +9,7 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, @@ -26,8 +27,8 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 0eb88c1f9ef..37c00be23be 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,6 +6,7 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, @@ -18,8 +19,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index bb532329ee3..b4d2e5e48b8 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,6 +7,7 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -20,7 +21,7 @@ use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9ba20306f37..eef0ff5b668 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,6 +15,7 @@ use crate::{ StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -42,9 +43,9 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index d3dde5b0d3b..d1e1822d8c9 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -7,6 +7,7 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, @@ -23,8 +24,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 8d1dbd117cf..9c303394ed2 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,13 +6,14 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; use reth_db_api::models::CompactU256; use reth_primitives::{ - Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cb270a6da46..a5d4537245d 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -7,6 +7,7 @@ use crate::{ ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -33,7 +34,7 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_stages_types::{PipelineTarget, StageId}; diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 52eb6ed666e..dd52adf52f8 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -56,7 +56,7 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; - use alloy_consensus::Transaction; + use alloy_consensus::{Header, Transaction}; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; use reth_db::{ @@ -66,7 +66,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Header, Receipt, TransactionSignedNoHash, + Receipt, TransactionSignedNoHash, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 8c31c021f21..ed1a51068c3 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,6 +2,7 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; +use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; @@ -9,7 +10,7 @@ use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, - Header, Receipt, StaticFileSegment, TransactionSignedNoHash, + Receipt, StaticFileSegment, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 9afc77ef701..3259eee2bfb 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -5,14 +5,15 @@ use alloy_primitives::{ b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, }; +use alloy_consensus::Header; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TxType, + Account, BlockBody, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 9bc75f53d18..9661ab2057c 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -5,7 +5,7 @@ use crate::{ ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::constants::EMPTY_ROOT_HASH; +use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumberOrTag, @@ -23,7 +23,7 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 7c3848b4a53..38fab0dc311 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,6 +4,7 @@ use std::{ sync::Arc, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumberOrTag, @@ -21,9 +22,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, + Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 6ca024b0a9f..0fbec6c1b88 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -3,6 +3,7 @@ use crate::{ writer::static_file::StaticFileWriter, BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256, U256}; use reth_chain_state::ExecutedBlock; use reth_db::{ @@ -13,7 +14,7 @@ use reth_db::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Header, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index c78ec5f8b80..929f7ecca43 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,11 +2,12 @@ use crate::{ BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, }; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7202f51ddf1..c068f7c1d29 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,6 +1,7 @@ +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 53563ab9575..e763a932eab 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 16aad63c093..c564c5b28b6 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{address, Address, Bytes, U256}; use reth::{ @@ -35,7 +36,7 @@ use reth_node_ethereum::{ }; use reth_primitives::{ revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - Header, TransactionSigned, + TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; use std::{convert::Infallible, sync::Arc}; diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index 47a784c36e1..478886d061f 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -15,6 +15,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 371fbf4f78b..5be45ad7674 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Address, Bytes, U256}; use parking_lot::RwLock; @@ -26,7 +27,7 @@ use reth_node_ethereum::{ }; use reth_primitives::{ revm_primitives::{SpecId, StatefulPrecompileMut}, - Header, TransactionSigned, + TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index de46f62675c..2fc0c751244 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -38,6 +38,7 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 2b6b3baa81e..292b32e8ce0 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,7 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_consensus::Header as RethHeader; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; @@ -9,9 +10,7 @@ use reth_db_api::{ cursor::DbDupCursorRO, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{ - Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, -}; +use reth_primitives::{Account as RethAccount, Bytecode, SealedHeader, StorageEntry}; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 3457eb5f203..582298feab9 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,6 +1,6 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Transaction as _, TxLegacy}; +use alloy_consensus::{Header, Transaction as _, TxLegacy}; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; @@ -8,7 +8,7 @@ use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, + proofs, sign_message, Account, BlockBody, Log, Receipt, SealedBlock, SealedHeader, StorageEntry, Transaction, TransactionSigned, }; use secp256k1::{Keypair, Secp256k1}; @@ -99,7 +99,7 @@ pub fn random_header_range( /// /// The header is assumed to not be correct if validated. pub fn random_header(rng: &mut R, number: u64, parent: Option) -> SealedHeader { - let header = reth_primitives::Header { + let header = alloy_consensus::Header { number, nonce: rng.gen(), difficulty: U256::from(rng.gen::()), From 7a1698c504c4ee6f5d06e74ae087730dbec0a3b0 Mon Sep 17 00:00:00 2001 From: Ayodeji Akinola Date: Wed, 13 Nov 2024 18:07:59 +0100 Subject: [PATCH 460/970] chore(utils): Util function for headers request (#12501) --- .../src/headers/reverse_headers.rs | 13 +++---- crates/net/p2p/src/full_block.rs | 6 +--- crates/net/p2p/src/headers/client.rs | 35 +++++++++++++++---- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 125eef6d3eb..3960ae6e812 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -13,7 +13,7 @@ use reth_consensus::Consensus; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersDirection, HeadersRequest}, + client::{HeadersClient, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, @@ -60,9 +60,10 @@ impl From for ReverseHeadersDownloaderError { /// tries to fill the gap between the local head of the node and the chain tip by issuing multiple /// requests at a time but yielding them in batches on [`Stream::poll_next`]. /// -/// **Note:** This downloader downloads in reverse, see also [`HeadersDirection::Falling`], this -/// means the batches of headers that this downloader yields will start at the chain tip and move -/// towards the local head: falling block numbers. +/// **Note:** This downloader downloads in reverse, see also +/// [`reth_network_p2p::headers::client::HeadersDirection`], this means the batches of headers that +/// this downloader yields will start at the chain tip and move towards the local head: falling +/// block numbers. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] pub struct ReverseHeadersDownloader { @@ -567,7 +568,7 @@ where /// Returns the request for the `sync_target` header. const fn get_sync_target_request(&self, start: BlockHashOrNumber) -> HeadersRequest { - HeadersRequest { start, limit: 1, direction: HeadersDirection::Falling } + HeadersRequest::falling(start, 1) } /// Starts a request future @@ -1216,7 +1217,7 @@ fn calc_next_request( let diff = next_request_block_number - local_head; let limit = diff.min(request_limit); let start = next_request_block_number; - HeadersRequest { start: start.into(), limit, direction: HeadersDirection::Falling } + HeadersRequest::falling(start.into(), limit) } #[cfg(test)] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 8fcacd140b0..151a5bdd2a3 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -96,11 +96,7 @@ where start_hash: hash, count, request: FullBlockRangeRequest { - headers: Some(client.get_headers(HeadersRequest { - start: hash.into(), - limit: count, - direction: HeadersDirection::Falling, - })), + headers: Some(client.get_headers(HeadersRequest::falling(hash.into(), count))), bodies: None, }, client, diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index bb879784499..3e8f9296e07 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -21,6 +21,34 @@ pub struct HeadersRequest { pub direction: HeadersDirection, } +impl HeadersRequest { + /// Creates a request for a single header (direction doesn't matter). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + pub const fn one(start: BlockHashOrNumber) -> Self { + Self { direction: HeadersDirection::Rising, limit: 1, start } + } + + /// Creates a request for headers in rising direction (ascending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn rising(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Rising, limit, start } + } + + /// Creates a request for headers in falling direction (descending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn falling(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Falling, limit, start } + } +} + /// The headers future type pub type HeadersFut = Pin>> + Send + Sync>>; @@ -57,12 +85,7 @@ pub trait HeadersClient: DownloadClient { start: BlockHashOrNumber, priority: Priority, ) -> SingleHeaderRequest { - let req = HeadersRequest { - start, - limit: 1, - // doesn't matter for a single header - direction: HeadersDirection::Rising, - }; + let req = HeadersRequest::one(start); let fut = self.get_headers_with_priority(req, priority); SingleHeaderRequest { fut } } From 413d65139181b36c5133b7e74be942eab9404a01 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Thu, 14 Nov 2024 00:11:32 +0700 Subject: [PATCH 461/970] chore: simplify import path (#12523) --- crates/storage/libmdbx-rs/src/txn_manager.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 6afd4205a60..ae4a93724c4 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -5,7 +5,10 @@ use crate::{ }; use std::{ ptr, - sync::mpsc::{sync_channel, Receiver, SyncSender}, + sync::{ + mpsc::{sync_channel, Receiver, SyncSender}, + Arc, + }, }; #[derive(Copy, Clone, Debug)] @@ -28,7 +31,7 @@ pub(crate) enum TxnManagerMessage { pub(crate) struct TxnManager { sender: SyncSender, #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, + read_transactions: Option>, } impl TxnManager { From 0d850e7f05ad7b68ced7035db43eee9128a6fc84 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Nov 2024 18:13:00 +0100 Subject: [PATCH 462/970] fix: consume all payload variants (#12520) Co-authored-by: Arsenii Kulikov --- crates/payload/basic/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 6f2038ba4b4..9b36e44b1fc 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -616,7 +616,9 @@ where if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { this.maybe_better = None; - if let Ok(BuildOutcome::Better { payload, .. }) = res { + if let Ok(Some(payload)) = res.map(|out| out.into_payload()) + .inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload")) + { debug!(target: "payload_builder", "resolving better payload"); return Poll::Ready(Ok(payload)) } @@ -767,7 +769,7 @@ impl BuildOutcome { /// Consumes the type and returns the payload if the outcome is `Better`. pub fn into_payload(self) -> Option { match self { - Self::Better { payload, .. } => Some(payload), + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), _ => None, } } From c326708ffc14f1dae63419521884b0a90b3e037d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 13 Nov 2024 21:38:30 +0400 Subject: [PATCH 463/970] feat: add simple kurtosis test for OP stack (#12506) --- .../assets/kurtosis_op_network_params.yaml | 15 +++ .github/workflows/kurtosis-op.yml | 119 ++++++++++++++++++ 2 files changed, 134 insertions(+) create mode 100644 .github/assets/kurtosis_op_network_params.yaml create mode 100644 .github/workflows/kurtosis-op.yml diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml new file mode 100644 index 00000000000..0e1516cc889 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -0,0 +1,15 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml new file mode 100644 index 00000000000..2652992fca9 --- /dev/null +++ b/.github/workflows/kurtosis-op.yml @@ -0,0 +1,119 @@ +# Runs simple OP stack setup in Kurtosis + +name: kurtosis-op + +on: + workflow_dispatch: + schedule: + # every day + - cron: "0 1 * * *" + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-reth: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features optimism,asm-keccak --profile hivetests --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml --locked + mkdir dist && cp ./target/hivetests/op-reth ./dist/reth + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and export reth image + uses: docker/build-push-action@v6 + with: + context: . + file: .github/assets/hive/Dockerfile + tags: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + outputs: type=docker,dest=./artifacts/reth_image.tar + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: + group: Reth + needs: + - prepare-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download reth image + uses: actions/download-artifact@v4 + with: + name: artifacts + path: /tmp + + - name: Load Docker image + run: | + docker load -i /tmp/reth_image.tar & + wait + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: + group: Reth + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} From d505089960c3ea2c65812db05a35f696f3f9d339 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 13 Nov 2024 22:53:00 +0100 Subject: [PATCH 464/970] tx-pool: rm useless allow deprecated (#12526) --- crates/transaction-pool/src/traits.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 185c08c109a..0b3839a0436 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,5 +1,3 @@ -#![allow(deprecated)] - use crate::{ blobstore::BlobStoreError, error::{InvalidPoolTransactionError, PoolResult}, From 457ac5f73fd29563a43f557542abcebf446a2ba3 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:46:58 +0200 Subject: [PATCH 465/970] fix: typos in documentation files (#12528) --- book/run/pruning.md | 2 +- docs/crates/stages.md | 2 +- docs/repo/labels.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/book/run/pruning.md b/book/run/pruning.md index da3bb07e2cd..25d11b4e46e 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -18,7 +18,7 @@ the steps for running Reth as a full node, what caveats to expect and how to con - Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available for querying in the same way as an archive node. -The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes diff --git a/docs/crates/stages.md b/docs/crates/stages.md index c7815b453b4..14666c1f44f 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -43,7 +43,7 @@ pub trait Stage: Send + Sync { } ``` -To get a better idea of what is happening at each part of the pipeline, lets walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`. +To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`.
diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6b3dba97ee6..6772b828ffc 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -30,7 +30,7 @@ For easier at-a-glance communication of the status of issues and PRs the followi - https://github.com/paradigmxyz/reth/labels/S-duplicate - https://github.com/paradigmxyz/reth/labels/S-wontfix -**Misc.** +**Miscellaneous** - https://github.com/paradigmxyz/reth/labels/S-needs-triage - https://github.com/paradigmxyz/reth/labels/S-controversial From 68a6ada460c31c74f25d3b5418b8bc878edfe082 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 14 Nov 2024 09:47:56 +0100 Subject: [PATCH 466/970] tx-pool: add `PoolUpdateKind` for `CanonicalStateUpdate` (#12525) --- crates/transaction-pool/src/maintain.rs | 4 +++- crates/transaction-pool/src/pool/mod.rs | 5 ++++- crates/transaction-pool/src/pool/txpool.rs | 9 ++++++++- crates/transaction-pool/src/traits.rs | 13 ++++++++++++- 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 91b91fe8157..271c63a388a 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -5,7 +5,7 @@ use crate::{ error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, + BlockInfo, PoolTransaction, PoolUpdateKind, }; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Address, BlockHash, BlockNumber}; @@ -352,6 +352,7 @@ pub async fn maintain_transaction_pool( changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_blocks.transaction_hashes().collect(), + update_kind: PoolUpdateKind::Reorg, }; pool.on_canonical_state_change(update); @@ -434,6 +435,7 @@ pub async fn maintain_transaction_pool( pending_block_blob_fee, changed_accounts, mined_transactions, + update_kind: PoolUpdateKind::Commit, }; pool.on_canonical_state_change(update); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 76b2490b12f..78cc790e942 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -394,7 +394,9 @@ where trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); - let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + let CanonicalStateUpdate { + new_tip, changed_accounts, mined_transactions, update_kind, .. + } = update; self.validator.on_new_head_block(new_tip); let changed_senders = self.changed_senders(changed_accounts.into_iter()); @@ -404,6 +406,7 @@ where block_info, mined_transactions, changed_senders, + update_kind, ); // This will discard outdated transactions based on the account's nonce diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1d35f742ab6..3d72d6a9f15 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -15,7 +15,7 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BestTransactionsAttributes, BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, + PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; use alloy_consensus::constants::{ @@ -76,6 +76,8 @@ pub struct TxPool { all_transactions: AllTransactions, /// Transaction pool metrics metrics: TxPoolMetrics, + /// The last update kind that was applied to the pool. + latest_update_kind: Option, } // === impl TxPool === @@ -92,6 +94,7 @@ impl TxPool { all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), + latest_update_kind: None, } } @@ -479,6 +482,7 @@ impl TxPool { block_info: BlockInfo, mined_transactions: Vec, changed_senders: HashMap, + update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; @@ -497,6 +501,9 @@ impl TxPool { self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); + // Update the latest update kind + self.latest_update_kind = Some(update_kind); + OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0b3839a0436..9b4cccfb9d1 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -483,7 +483,7 @@ pub trait TransactionPoolExt: TransactionPool { /// /// ## Fee changes /// - /// The [CanonicalStateUpdate] includes the base and blob fee of the pending block, which + /// The [`CanonicalStateUpdate`] includes the base and blob fee of the pending block, which /// affects the dynamic fee requirement of pending transactions in the pool. /// /// ## EIP-4844 Blob transactions @@ -669,6 +669,15 @@ impl TransactionOrigin { } } +/// Represents the kind of update to the canonical state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PoolUpdateKind { + /// The update was due to a block commit. + Commit, + /// The update was due to a reorganization. + Reorg, +} + /// Represents changes after a new canonical block or range of canonical blocks was added to the /// chain. /// @@ -693,6 +702,8 @@ pub struct CanonicalStateUpdate<'a> { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, + /// The kind of update to the canonical state. + pub update_kind: PoolUpdateKind, } impl CanonicalStateUpdate<'_> { From 7bd7c37b13960cb5c04b46ec678234596db299d6 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 14 Nov 2024 05:01:23 -0600 Subject: [PATCH 467/970] feat: display warning for op-mainnet launch without pre-Bedrock state (#11765) Co-authored-by: Matthias Seitz --- crates/node/builder/src/launch/common.rs | 25 +++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index f9106296323..7fafa9e5eac 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -40,9 +40,9 @@ use reth_node_metrics::{ use reth_primitives::Head; use reth_provider::{ providers::{BlockchainProvider, BlockchainProvider2, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, CanonStateNotificationSender, ChainSpecProvider, ProviderFactory, - ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, - TreeViewer, + BlockHashReader, BlockNumReader, CanonStateNotificationSender, ChainSpecProvider, + ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, TreeViewer, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; @@ -814,6 +814,23 @@ where self.node_config().debug.terminate || self.node_config().debug.max_block.is_some() } + /// Ensures that the database matches chain-specific requirements. + /// + /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past + /// bedrock height) + fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + if self.chain_id() == Chain::optimism_mainnet() { + let latest = self.blockchain_db().last_block_number()?; + // bedrock height + if latest < 105235063 { + error!("Op-mainnet has been launched without importing the pre-Bedrock state. The chain can't progress without this. See also https://reth.rs/run/sync-op-mainnet.html?minimal-bootstrap-recommended"); + return Err(ProviderError::BestBlockNotFound) + } + } + + Ok(()) + } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less /// than the checkpoint of the first stage). /// @@ -857,6 +874,8 @@ where } } + self.ensure_chain_specific_db_checks()?; + Ok(None) } From 5c655e44f658db8662fb5c75ce4d7ccbaf5172e7 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 14 Nov 2024 06:32:29 -0600 Subject: [PATCH 468/970] introduce standalone estimate gas type (#12344) Co-authored-by: Matthias Seitz --- crates/optimism/rpc/src/eth/call.rs | 12 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 345 +---------------- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 356 ++++++++++++++++++ crates/rpc/rpc-eth-api/src/helpers/mod.rs | 1 + .../rpc-eth-api/src/helpers/transaction.rs | 13 +- crates/rpc/rpc/src/eth/helpers/call.rs | 14 +- 6 files changed, 391 insertions(+), 350 deletions(-) create mode 100644 crates/rpc/rpc-eth-api/src/helpers/estimate.rs diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index a76c25916f3..9b19c488889 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -4,7 +4,7 @@ use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; use reth_primitives::revm_primitives::{BlockEnv, OptimismFields, TxEnv}; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; @@ -13,7 +13,15 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where - Self: Call + LoadPendingBlock, + Self: EstimateCall + LoadPendingBlock, + N: RpcNodeCore, +{ +} + +impl EstimateCall for OpEthApi +where + Self: Call, + Self::Error: From, N: RpcNodeCore, { } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 6a5506ad2ad..e45590d4264 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -1,9 +1,10 @@ //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC //! methods. +use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ - AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, - RpcNodeCore, + helpers::estimate::EstimateCall, FromEthApiError, FromEvmError, FullEthApiTypes, + IntoEthApiError, RpcBlock, RpcNodeCore, }; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; @@ -15,16 +16,15 @@ use alloy_rpc_types_eth::{ BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; use futures::Future; -use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, - ResultAndState, TransactTo, TxEnv, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, ResultAndState, TxEnv, }, TransactionSigned, }; -use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -36,19 +36,16 @@ use reth_rpc_eth_types::{ simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use revm::{Database, DatabaseCommit, GetInspector}; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; use tracing::trace; -use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; - /// Result type for `eth_simulateV1` RPC method. pub type SimulatedBlocksResult = Result>>, E>; /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. -pub trait EthCall: Call + LoadPendingBlock { +pub trait EthCall: EstimateCall + Call + LoadPendingBlock { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, @@ -56,7 +53,7 @@ pub trait EthCall: Call + LoadPendingBlock { at: BlockId, state_override: Option, ) -> impl Future> + Send { - Call::estimate_gas_at(self, request, at, state_override) + EstimateCall::estimate_gas_at(self, request, at, state_override) } /// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state. @@ -683,284 +680,6 @@ pub trait Call: LoadState> + SpawnBlocking { Ok(index) } - /// Estimate gas needed for execution of the `request` at the [`BlockId`]. - fn estimate_gas_at( - &self, - request: TransactionRequest, - at: BlockId, - state_override: Option, - ) -> impl Future> + Send - where - Self: LoadPendingBlock, - { - async move { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - - self.spawn_blocking_io(move |this| { - let state = this.state_at_block_id(at)?; - this.estimate_gas_with(cfg, block_env, request, state, state_override) - }) - .await - } - } - - /// Estimates the gas usage of the `request` with the state. - /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. - /// - /// ## EVM settings - /// - /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : - /// - /// - `disable_eip3607` is set to `true` - /// - `disable_base_fee` is set to `true` - /// - `nonce` is set to `None` - fn estimate_gas_with( - &self, - mut cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, - mut request: TransactionRequest, - state: S, - state_override: Option, - ) -> Result - where - S: StateProvider, - { - // Disabled because eth_estimateGas is sometimes used with eoa senders - // See - cfg.disable_eip3607 = true; - - // The basefee should be ignored for eth_estimateGas and similar - // See: - // - cfg.disable_base_fee = true; - - // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; - - // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; - // the gas limit of the corresponding block - let block_env_gas_limit = block.gas_limit; - - // Determine the highest possible gas limit, considering both the request's specified limit - // and the block's limit. - let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) - .unwrap_or(block_env_gas_limit); - - // Configure the evm env - let mut env = self.build_call_evm_env(cfg, block, request)?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // Apply any state overrides if specified. - if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; - } - - // Optimize for simple transfer transactions, potentially reducing the gas estimate. - if env.tx.data.is_empty() { - if let TransactTo::Call(to) = env.tx.transact_to { - if let Ok(code) = db.db.account_code(to) { - let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); - if no_code_callee { - // If the tx is a simple transfer (call to an account with no code) we can - // shortcircuit. But simply returning - // `MIN_TRANSACTION_GAS` is dangerous because there might be additional - // field combos that bump the price up, so we try executing the function - // with the minimum gas limit to make sure. - let mut env = env.clone(); - env.tx.gas_limit = MIN_TRANSACTION_GAS; - if let Ok((res, _)) = self.transact(&mut db, env) { - if res.result.is_success() { - return Ok(U256::from(MIN_TRANSACTION_GAS)) - } - } - } - } - } - } - - // Check funds of the sender (only useful to check if transaction gas price is more than 0). - // - // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` - if env.tx.gas_price > U256::ZERO { - // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit - .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); - } - - // We can now normalize the highest gas limit to a u64 - let mut highest_gas_limit: u64 = highest_gas_limit - .try_into() - .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); - - // If the provided gas limit is less than computed cap, use that - env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); - - trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); - - // Execute the transaction with the highest possible gas limit. - let (mut res, mut env) = match self.transact(&mut db, env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much gas. - // If the gas price or gas limit was specified in the request, retry the transaction - // with the block's gas limit to determine if the failure was due to - // insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => - { - return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; - - let gas_refund = match res.result { - ExecutionResult::Success { gas_refunded, .. } => gas_refunded, - ExecutionResult::Halt { reason, gas_used } => { - // here we don't check for invalid opcode because already executed with highest gas - // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) - } - ExecutionResult::Revert { output, .. } => { - // if price or limit was included in the request then we can execute the request - // again with the block's gas limit to check if revert is gas related or not - return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } else { - // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) - } - } - }; - - // At this point we know the call succeeded but want to find the _best_ (lowest) gas the - // transaction succeeds with. We find this by doing a binary search over the possible range. - - // we know the tx succeeded with the configured gas limit, so we can use that as the - // highest, in case we applied a gas cap due to caller allowance above - highest_gas_limit = env.tx.gas_limit; - - // NOTE: this is the gas the transaction used, which is less than the - // transaction requires to succeed. - let mut gas_used = res.result.gas_used(); - // the lowest value is capped by the gas used by the unconstrained transaction - let mut lowest_gas_limit = gas_used.saturating_sub(1); - - // As stated in Geth, there is a good chance that the transaction will pass if we set the - // gas limit to the execution gas used plus the gas refund, so we check this first - // 1 { - // An estimation error is allowed once the current gas limit range used in the binary - // search is small enough (less than 1.5% of the highest gas limit) - // { - // Decrease the highest gas limit if gas is too high - highest_gas_limit = mid_gas_limit; - } - Err(err) if err.is_gas_too_low() => { - // Increase the lowest gas limit if gas is too low - lowest_gas_limit = mid_gas_limit; - } - // Handle other cases, including successful transactions. - ethres => { - // Unpack the result and environment if the transaction was successful. - (res, env) = ethres?; - // Update the estimated gas range based on the transaction result. - update_estimated_gas_range( - res.result, - mid_gas_limit, - &mut highest_gas_limit, - &mut lowest_gas_limit, - )?; - } - } - - // New midpoint - mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; - } - - Ok(U256::from(highest_gas_limit)) - } - - /// Executes the requests again after an out of gas error to check if the error is gas related - /// or not - #[inline] - fn map_out_of_gas_err( - &self, - env_gas_limit: U256, - mut env: EnvWithHandlerCfg, - db: &mut DB, - ) -> Self::Error - where - DB: Database, - EthApiError: From, - { - let req_gas_limit = env.tx.gas_limit; - env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(db, env) { - Ok(res) => res, - Err(err) => return err, - }; - match res.result { - ExecutionResult::Success { .. } => { - // transaction succeeded by manually increasing the gas limit to - // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() - } - ExecutionResult::Revert { output, .. } => { - // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() - } - ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() - } - } - } - /// Configures a new [`TxEnv`] for the [`TransactionRequest`] /// /// All [`TxEnv`] fields are derived from the given [`TransactionRequest`], if fields are @@ -1125,51 +844,3 @@ pub trait Call: LoadState> + SpawnBlocking { Ok(env) } } - -/// Updates the highest and lowest gas limits for binary search based on the execution result. -/// -/// This function refines the gas limit estimates used in a binary search to find the optimal -/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on -/// whether the execution succeeded, reverted, or halted due to specific reasons. -#[inline] -fn update_estimated_gas_range( - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, -) -> Result<(), EthApiError> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { - // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas - // left is too low. Treat this as an out of gas - // condition, knowing that the call succeeds with a - // higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction - // succeeds, but if they occur, treat them as an - // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) - } - } - } - }; - - Ok(()) -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs new file mode 100644 index 00000000000..37a68577fb0 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -0,0 +1,356 @@ +//! Estimate gas needed implementation + +use super::{Call, LoadPendingBlock}; +use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; +use alloy_primitives::U256; +use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; +use futures::Future; +use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_primitives::revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo, +}; +use reth_provider::{ChainSpecProvider, StateProvider}; +use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_rpc_eth_types::{ + revm_utils::{apply_state_overrides, caller_gas_allowance}, + EthApiError, RevertError, RpcInvalidTransactionError, +}; +use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; +use revm_primitives::{db::Database, EnvWithHandlerCfg}; +use tracing::trace; + +/// Gas execution estimates +pub trait EstimateCall: Call { + /// Estimates the gas usage of the `request` with the state. + /// + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. + /// + /// ## EVM settings + /// + /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : + /// + /// - `disable_eip3607` is set to `true` + /// - `disable_base_fee` is set to `true` + /// - `nonce` is set to `None` + fn estimate_gas_with( + &self, + mut cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + mut request: TransactionRequest, + state: S, + state_override: Option, + ) -> Result + where + S: StateProvider, + { + // Disabled because eth_estimateGas is sometimes used with eoa senders + // See + cfg.disable_eip3607 = true; + + // The basefee should be ignored for eth_estimateGas and similar + // See: + // + cfg.disable_base_fee = true; + + // set nonce to None so that the correct nonce is chosen by the EVM + request.nonce = None; + + // Keep a copy of gas related request values + let tx_request_gas_limit = request.gas; + let tx_request_gas_price = request.gas_price; + // the gas limit of the corresponding block + let block_env_gas_limit = block.gas_limit; + + // Determine the highest possible gas limit, considering both the request's specified limit + // and the block's limit. + let mut highest_gas_limit = tx_request_gas_limit + .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .unwrap_or(block_env_gas_limit); + + // Configure the evm env + let mut env = self.build_call_evm_env(cfg, block, request)?; + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // Apply any state overrides if specified. + if let Some(state_override) = state_override { + apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; + } + + // Optimize for simple transfer transactions, potentially reducing the gas estimate. + if env.tx.data.is_empty() { + if let TransactTo::Call(to) = env.tx.transact_to { + if let Ok(code) = db.db.account_code(to) { + let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); + if no_code_callee { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut env = env.clone(); + env.tx.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, env) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + } + } + } + + // Check funds of the sender (only useful to check if transaction gas price is more than 0). + // + // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` + if env.tx.gas_price > U256::ZERO { + // cap the highest gas limit by max gas caller can afford with given gas price + highest_gas_limit = highest_gas_limit + .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); + } + + // We can now normalize the highest gas limit to a u64 + let mut highest_gas_limit: u64 = highest_gas_limit + .try_into() + .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); + + // If the provided gas limit is less than computed cap, use that + env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + + trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + + // Execute the transaction with the highest possible gas limit. + let (mut res, mut env) = match self.transact(&mut db, env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much gas. + // If the gas price or gas limit was specified in the request, retry the transaction + // with the block's gas limit to determine if the failure was due to + // insufficient gas. + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; + + let gas_refund = match res.result { + ExecutionResult::Success { gas_refunded, .. } => gas_refunded, + ExecutionResult::Halt { reason, gas_used } => { + // here we don't check for invalid opcode because already executed with highest gas + // limit + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) + } + ExecutionResult::Revert { output, .. } => { + // if price or limit was included in the request then we can execute the request + // again with the block's gas limit to check if revert is gas related or not + return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { + Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } else { + // the transaction did revert + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + } + } + }; + + // At this point we know the call succeeded but want to find the _best_ (lowest) gas the + // transaction succeeds with. We find this by doing a binary search over the possible range. + + // we know the tx succeeded with the configured gas limit, so we can use that as the + // highest, in case we applied a gas cap due to caller allowance above + highest_gas_limit = env.tx.gas_limit; + + // NOTE: this is the gas the transaction used, which is less than the + // transaction requires to succeed. + let mut gas_used = res.result.gas_used(); + // the lowest value is capped by the gas used by the unconstrained transaction + let mut lowest_gas_limit = gas_used.saturating_sub(1); + + // As stated in Geth, there is a good chance that the transaction will pass if we set the + // gas limit to the execution gas used plus the gas refund, so we check this first + // 1 { + // An estimation error is allowed once the current gas limit range used in the binary + // search is small enough (less than 1.5% of the highest gas limit) + // { + // Decrease the highest gas limit if gas is too high + highest_gas_limit = mid_gas_limit; + } + Err(err) if err.is_gas_too_low() => { + // Increase the lowest gas limit if gas is too low + lowest_gas_limit = mid_gas_limit; + } + // Handle other cases, including successful transactions. + ethres => { + // Unpack the result and environment if the transaction was successful. + (res, env) = ethres?; + // Update the estimated gas range based on the transaction result. + update_estimated_gas_range( + res.result, + mid_gas_limit, + &mut highest_gas_limit, + &mut lowest_gas_limit, + )?; + } + } + + // New midpoint + mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; + } + + Ok(U256::from(highest_gas_limit)) + } + + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(at)?; + EstimateCall::estimate_gas_with( + &this, + cfg, + block_env, + request, + state, + state_override, + ) + }) + .await + } + } + + /// Executes the requests again after an out of gas error to check if the error is gas related + /// or not + #[inline] + fn map_out_of_gas_err( + &self, + env_gas_limit: U256, + mut env: EnvWithHandlerCfg, + db: &mut DB, + ) -> Self::Error + where + DB: Database, + EthApiError: From, + { + let req_gas_limit = env.tx.gas_limit; + env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, env) { + Ok(res) => res, + Err(err) => return err, + }; + match res.result { + ExecutionResult::Success { .. } => { + // transaction succeeded by manually increasing the gas limit to + // highest, which means the caller lacks funds to pay for the tx + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() + } + ExecutionResult::Revert { output, .. } => { + // reverted again after bumping the limit + RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() + } + ExecutionResult::Halt { reason, .. } => { + RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() + } + } + } +} + +/// Updates the highest and lowest gas limits for binary search based on the execution result. +/// +/// This function refines the gas limit estimates used in a binary search to find the optimal +/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on +/// whether the execution succeeded, reverted, or halted due to specific reasons. +#[inline] +pub fn update_estimated_gas_range( + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, +) -> Result<(), EthApiError> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + } + } + } + }; + + Ok(()) +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index a881330b045..174cb3bad04 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,6 +17,7 @@ pub mod block; pub mod blocking_task; pub mod call; +pub mod estimate; pub mod fee; pub mod pending_block; pub mod receipt; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index e041b8c4605..afe1c513b69 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -18,13 +18,12 @@ use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_blo use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use crate::{ - FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, - RpcTransaction, -}; - use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, + EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, +}; +use crate::{ + helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, + RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, }; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -348,7 +347,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + Call, + Self: EthApiSpec + LoadBlock + LoadPendingBlock + EstimateCall, { async move { let from = match request.from { diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 1c1e35a5df8..c0594c023fa 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,13 +1,14 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. +use crate::EthApi; use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; - -use crate::EthApi; +use reth_rpc_eth_api::helpers::{ + estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, +}; impl EthCall for EthApi where - Self: Call + LoadPendingBlock + Self: EstimateCall + LoadPendingBlock { } @@ -26,3 +27,8 @@ where self.inner.max_simulate_blocks() } } + +impl EstimateCall for EthApi where + Self: Call +{ +} From ff6b78a3627a0fb03da5ab2093672618cd3b70b1 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 14 Nov 2024 17:50:32 +0400 Subject: [PATCH 469/970] feat: move body writing to `BlockWriter` trait (#12538) --- crates/net/p2p/src/bodies/response.rs | 8 +++ crates/stages/stages/src/stages/bodies.rs | 72 +++++++------------ crates/storage/db-models/src/blocks.rs | 2 +- .../src/providers/database/provider.rs | 46 +++++++++++- crates/storage/provider/src/traits/block.rs | 12 +++- 5 files changed, 89 insertions(+), 51 deletions(-) diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 153d7d39d4e..11aaab17a30 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -32,6 +32,14 @@ impl BlockResponse { Self::Empty(header) => header.difficulty, } } + + /// Return the reference to the response body + pub fn into_body(self) -> Option { + match self { + Self::Full(block) => Some(block.body), + Self::Empty(_) => None, + } + } } impl InMemorySize for BlockResponse { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index eae61b088fd..c4676b2728c 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -7,17 +7,16 @@ use futures_util::TryStreamExt; use tracing::*; use alloy_primitives::TxNumber; -use reth_db::tables; +use reth_db::{tables, transaction::DbTx}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, transaction::DbTxMut, }; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, + BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -72,7 +71,11 @@ impl BodyStage { impl Stage for BodyStage where - Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, + Provider: DBProvider + + StaticFileProviderFactory + + StatsReader + + BlockReader + + BlockWriter, D: BodyDownloader, { /// Return the id of the stage @@ -116,15 +119,13 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - // Cursors used to write bodies, ommers and transactions - let tx = provider.tx_ref(); - let mut block_indices_cursor = tx.cursor_write::()?; - let mut tx_block_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - // Get id for the next tx_num of zero if there are no transactions. - let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); + let mut next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); let static_file_provider = provider.static_file_provider(); let mut static_file_producer = @@ -166,17 +167,10 @@ where let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); let mut highest_block = from_block; - for response in buffer { - // Write block - let block_number = response.block_number(); - let block_indices = StoredBlockBodyIndices { - first_tx_num: next_tx_num, - tx_count: match &response { - BlockResponse::Full(block) => block.body.transactions.len() as u64, - BlockResponse::Empty(_) => 0, - }, - }; + // Firstly, write transactions to static files + for response in &buffer { + let block_number = response.block_number(); // Increment block on static file header. if block_number > 0 { @@ -195,15 +189,10 @@ where match response { BlockResponse::Full(block) => { - // write transaction block index - if !block.body.transactions.is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), block.number)?; - } - // Write transactions - for transaction in block.body.transactions { + for transaction in &block.body.transactions { let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, &transaction.into())?; + .append_transaction(next_tx_num, &transaction.clone().into())?; if appended_tx_number != next_tx_num { // This scenario indicates a critical error in the logic of adding new @@ -218,32 +207,19 @@ where // Increment transaction id for each transaction. next_tx_num += 1; } - - // Write ommers if any - if !block.body.ommers.is_empty() { - ommers_cursor.append( - block_number, - StoredBlockOmmers { ommers: block.body.ommers }, - )?; - } - - // Write withdrawals if any - if let Some(withdrawals) = block.body.withdrawals { - if !withdrawals.is_empty() { - withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; - } - } } BlockResponse::Empty(_) => {} }; - // insert block meta - block_indices_cursor.append(block_number, block_indices)?; - highest_block = block_number; } + // Write bodies to database. This will NOT write transactions to database as we've already + // written them directly to static files. + provider.append_block_bodies( + buffer.into_iter().map(|response| (response.block_number(), response.into_body())), + )?; + // The stage is "done" if: // - We got fewer blocks than our target // - We reached our target and the target was not limited by the batch size of the stage diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index ed1d7fb6772..be7661c8b12 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -12,7 +12,7 @@ pub type NumTransactions = u64; /// /// It has the pointer to the transaction Number of the first /// transaction in the block and the total number of transactions. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StoredBlockBodyIndices { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index eef0ff5b668..20d01932a15 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3250,7 +3250,7 @@ impl + } let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; - self.tx.put::(block_number, block_indices.clone())?; + self.tx.put::(block_number, block_indices)?; durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); if !block_indices.is_empty() { @@ -3268,6 +3268,50 @@ impl + Ok(block_indices) } + fn append_block_bodies( + &self, + bodies: impl Iterator)>, + ) -> ProviderResult<()> { + let mut block_indices_cursor = self.tx.cursor_write::()?; + let mut tx_block_cursor = self.tx.cursor_write::()?; + let mut ommers_cursor = self.tx.cursor_write::()?; + let mut withdrawals_cursor = self.tx.cursor_write::()?; + + // Get id for the next tx_num of zero if there are no transactions. + let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); + + for (block_number, body) in bodies { + let tx_count = body.as_ref().map(|b| b.transactions.len() as u64).unwrap_or_default(); + let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; + + // insert block meta + block_indices_cursor.append(block_number, block_indices)?; + + next_tx_num += tx_count; + let Some(body) = body else { continue }; + + // write transaction block index + if !body.transactions.is_empty() { + tx_block_cursor.append(block_indices.last_tx_num(), block_number)?; + } + + // Write ommers if any + if !body.ommers.is_empty() { + ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + } + + // Write withdrawals if any + if let Some(withdrawals) = body.withdrawals { + if !withdrawals.is_empty() { + withdrawals_cursor + .append(block_number, StoredBlockWithdrawals { withdrawals })?; + } + } + } + + Ok(()) + } + /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 7202c405f06..5cb60c2f42a 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,7 +1,7 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::{BlockBody, SealedBlockWithSenders}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; @@ -40,6 +40,16 @@ pub trait BlockWriter: Send + Sync { fn insert_block(&self, block: SealedBlockWithSenders) -> ProviderResult; + /// Appends a batch of block bodies extending the canonical chain. This is invoked during + /// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders` + /// tables which are populated on later stages. + /// + /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. + fn append_block_bodies( + &self, + bodies: impl Iterator)>, + ) -> ProviderResult<()>; + /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. /// From 3154a4f66c0f31c5cb9e6e28d58130bef219eb30 Mon Sep 17 00:00:00 2001 From: Noisy <125606576+donatik27@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:35:04 +0100 Subject: [PATCH 470/970] Documentation Improvements: Grammar Corrections and Clarity Enhancements (#12545) --- docs/crates/db.md | 6 +++--- docs/repo/layout.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/crates/db.md b/docs/crates/db.md index 79eeae5ee4f..688f7ea76cc 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -212,7 +212,7 @@ pub trait DbTxMut: Send + Sync { Let's take a look at the `DbTx` and `DbTxMut` traits in action. -Revisiting the `DatabaseProvider` struct as an exampl, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. +Revisiting the `DatabaseProvider` struct as an example, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. [File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L1319-L1336) @@ -267,7 +267,7 @@ let mut headers_cursor = provider.tx_ref().cursor_read::()?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; ``` -Lets look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. +Let's look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. [File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/stages/stages/src/stages/bodies.rs#L267-L345) @@ -330,7 +330,7 @@ While this is a brief look at how cursors work in the context of database tables ## Summary -This chapter was packed with information, so lets do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database. +This chapter was packed with information, so let's do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database.
diff --git a/docs/repo/layout.md b/docs/repo/layout.md index f78abe96122..dcb475e020e 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -132,7 +132,7 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). - Supported transports: HTTP, WS, IPC - Supported namespaces: `eth_`, `engine_`, `debug_` - [`rpc/rpc-eth-api`](../../crates/rpc/rpc-eth-api/): Reth RPC 'eth' namespace API (including interface and implementation), this crate is re-exported by `rpc/rpc-api` -- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting implementation` of 'eth' namespace RPC server API +- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting the implementation` of 'eth' namespace RPC server API - [`rpc/rpc-server-types`](../../crates/rpc/rpc-server-types/): RPC server types and constants #### Utilities Crates @@ -159,7 +159,7 @@ These crates define primitive types or algorithms. ### Optimism -Crates related to the Optimism rollup are lives in [optimism](../../crates/optimism/). +Crates related to the Optimism rollup live in [optimism](../../crates/optimism/). ### Misc From 77e687c28c9f4b6b9eb38e7e684622163536ea6c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 14 Nov 2024 19:23:15 +0400 Subject: [PATCH 471/970] feat: relax `BodyStage` bounds (#12539) --- crates/primitives-traits/src/block/body.rs | 69 ++----------------- crates/primitives-traits/src/node.rs | 2 +- crates/primitives/src/block.rs | 8 +++ crates/primitives/src/transaction/mod.rs | 16 +++++ crates/stages/stages/src/stages/bodies.rs | 16 ++--- .../stages/stages/src/test_utils/test_db.rs | 2 +- .../src/providers/database/provider.rs | 2 + .../src/providers/static_file/writer.rs | 4 +- crates/storage/provider/src/traits/block.rs | 7 +- 9 files changed, 47 insertions(+), 79 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 6ec184a2154..c5f15aefea6 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,10 +1,8 @@ //! Block body abstraction. -use alloc::{fmt, vec::Vec}; +use alloc::fmt; -use alloy_consensus::{BlockHeader, Transaction, TxType}; -use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; -use alloy_primitives::{Address, B256}; +use alloy_consensus::Transaction; use crate::InMemorySize; @@ -26,67 +24,8 @@ pub trait BlockBody: { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction - type SignedTransaction: Transaction; - - /// Header type (uncle blocks). - type Header: BlockHeader; - - /// Withdrawals in block. - type Withdrawals: Iterator; + type Transaction: Transaction; /// Returns reference to transactions in block. - fn transactions(&self) -> &[Self::SignedTransaction]; - - /// Returns `Withdrawals` in the block, if any. - // todo: branch out into extension trait - fn withdrawals(&self) -> Option<&Self::Withdrawals>; - - /// Returns reference to uncle block headers. - fn ommers(&self) -> &[Self::Header]; - - /// Returns [`Requests`] in block, if any. - fn requests(&self) -> Option<&Requests>; - - /// Calculate the transaction root for the block body. - fn calculate_tx_root(&self) -> B256; - - /// Calculate the ommers root for the block body. - fn calculate_ommers_root(&self) -> B256; - - /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no - /// withdrawals, this will return `None`. - // todo: can be default impl if `calculate_withdrawals_root` made into a method on - // `Withdrawals` and `Withdrawals` moved to alloy - fn calculate_withdrawals_root(&self) -> Option; - - /// Recover signer addresses for all transactions in the block body. - fn recover_signers(&self) -> Option>; - - /// Returns whether or not the block body contains any blob transactions. - fn has_blob_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() == TxType::Eip4844 as u8) - } - - /// Returns whether or not the block body contains any EIP-7702 transactions. - fn has_eip7702_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() == TxType::Eip7702 as u8) - } - - /// Returns an iterator over all blob transactions of the block - fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.transactions().iter().filter(|tx| tx.ty() == TxType::Eip4844 as u8) - } - - /// Returns only the blob transactions, if any, from the block body. - fn blob_transactions(&self) -> Vec<&Self::SignedTransaction> { - self.blob_transactions_iter().collect() - } - - /// Returns an iterator over all blob versioned hashes from the block body. - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_; - - /// Returns all blob versioned hashes from the block body. - fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } + fn transactions(&self) -> &[Self::Transaction]; } diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index cebbbe202e8..9ca69274831 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -24,7 +24,7 @@ impl NodePrimitives for () { /// Helper trait that sets trait bounds on [`NodePrimitives`]. pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { /// Block primitive. - type Block: FullBlock>; + type Block: FullBlock>; /// Signed version of the transaction type. type SignedTx: FullSignedTx; /// Transaction envelope type ID. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 703f9d33169..0f96a9d5842 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -657,6 +657,14 @@ impl InMemorySize for BlockBody { } } +impl reth_primitives_traits::BlockBody for BlockBody { + type Transaction = TransactionSigned; + + fn transactions(&self) -> &[Self::Transaction] { + &self.transactions + } +} + impl From for BlockBody { fn from(block: Block) -> Self { Self { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d1a95b09be2..e5e4517d9dd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1058,6 +1058,22 @@ impl reth_codecs::Compact for TransactionSignedNoHash { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TransactionSigned { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx: TransactionSignedNoHash = self.clone().into(); + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, buf) = TransactionSignedNoHash::from_compact(buf, len); + (tx.into(), buf) + } +} + impl From for TransactionSigned { fn from(tx: TransactionSignedNoHash) -> Self { tx.with_hash() diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index c4676b2728c..640bae86659 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -4,6 +4,8 @@ use std::{ }; use futures_util::TryStreamExt; +use reth_codecs::Compact; +use reth_primitives_traits::BlockBody; use tracing::*; use alloy_primitives::TxNumber; @@ -75,8 +77,8 @@ where + StaticFileProviderFactory + StatsReader + BlockReader - + BlockWriter, - D: BodyDownloader, + + BlockWriter, + D: BodyDownloader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -190,9 +192,9 @@ where match response { BlockResponse::Full(block) => { // Write transactions - for transaction in &block.body.transactions { - let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, &transaction.clone().into())?; + for transaction in block.body.transactions() { + let appended_tx_number = + static_file_producer.append_transaction(next_tx_num, transaction)?; if appended_tx_number != next_tx_num { // This scenario indicates a critical error in the logic of adding new @@ -702,9 +704,7 @@ mod tests { body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); - static_file_producer - .append_transaction(tx_num, &transaction.into()) - .map(drop) + static_file_producer.append_transaction(tx_num, &transaction).map(drop) })?; if body.tx_count != 0 { diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 4c43d4cdcd1..52983cb6f69 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -265,7 +265,7 @@ impl TestStageDB { let res = block.body.transactions.iter().try_for_each(|body_tx| { if let Some(txs_writer) = &mut txs_writer { - txs_writer.append_transaction(next_tx_num, &body_tx.clone().into())?; + txs_writer.append_transaction(next_tx_num, body_tx)?; } else { tx.put::(next_tx_num, body_tx.clone().into())? } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 20d01932a15..62a44c175b0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3110,6 +3110,8 @@ impl + impl + 'static> BlockWriter for DatabaseProvider { + type Body = BlockBody; + /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`Headers`](tables::Headers) diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index ed1a51068c3..2e54fb943a7 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -10,7 +10,7 @@ use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, - Receipt, StaticFileSegment, TransactionSignedNoHash, + Receipt, StaticFileSegment, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ @@ -544,7 +544,7 @@ impl StaticFileProviderRW { pub fn append_transaction( &mut self, tx_num: TxNumber, - tx: &TransactionSignedNoHash, + tx: impl Compact, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 5cb60c2f42a..50fb032923d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,7 +1,7 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{BlockBody, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; @@ -32,6 +32,9 @@ pub trait StateReader: Send + Sync { /// Block Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { + /// The body this writer can write. + type Body: Send + Sync; + /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. /// @@ -47,7 +50,7 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: impl Iterator)>, + bodies: impl Iterator)>, ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and From c5d1b813e4496244cfb673ae0e350035da4a6b4d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 17:03:28 +0100 Subject: [PATCH 472/970] chore: add missing debugs for pool types (#12546) --- crates/transaction-pool/src/pool/best.rs | 2 ++ crates/transaction-pool/src/validate/mod.rs | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 17165611794..21bcc668b75 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -20,6 +20,7 @@ use tracing::debug; /// This is a wrapper around [`BestTransactions`] that also enforces a specific basefee. /// /// This iterator guarantees that all transaction it returns satisfy both the base fee and blob fee! +#[derive(Debug)] pub(crate) struct BestTransactionsWithFees { pub(crate) best: BestTransactions, pub(crate) base_fee: u64, @@ -72,6 +73,7 @@ impl Iterator for BestTransactionsWithFees { /// be executed on the current state, but only yields transactions that are ready to be executed /// now. While it contains all gapless transactions of a sender, it _always_ only returns the /// transaction with the current on chain nonce. +#[derive(Debug)] pub(crate) struct BestTransactions { /// Contains a copy of _all_ transactions of the pending pool at the point in time this /// iterator was created. diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 6a3b0b96e97..8a5ecc9c419 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -453,9 +453,11 @@ impl Clone for ValidPoolTransaction { impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") + .field("id", &self.transaction_id) + .field("pragate", &self.propagate) + .field("origin", &self.origin) .field("hash", self.transaction.hash()) - .field("provides", &self.transaction_id) - .field("raw_tx", &self.transaction) + .field("tx", &self.transaction) .finish() } } From 4a0bc37cbbbcd11e08100d2f677aef703196ca7a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 17:08:46 +0100 Subject: [PATCH 473/970] chore: rm reth testing utils dep from reth-primitives-traits (#12542) --- Cargo.lock | 1 - crates/primitives-traits/Cargo.toml | 2 -- crates/primitives-traits/src/header/sealed.rs | 4 +--- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca5465d37bc..33d50319339 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8536,7 +8536,6 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.8.5", "reth-codecs", - "reth-testing-utils", "revm-primitives", "roaring", "serde", diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 6cafe8b8b1e..30f1c43c86a 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -39,8 +39,6 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] -reth-testing-utils.workspace = true - alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 5dd9fcf0d5f..e872eb9811d 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -219,10 +219,8 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; - use arbitrary::Arbitrary; use rand::Rng; - use reth_testing_utils::generators; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -236,7 +234,7 @@ pub(super) mod serde_bincode_compat { } let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); + rand::thread_rng().fill(&mut bytes[..]); let data = Data { transaction: SealedHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) .unwrap(), From 217d9f7c12c151722f7e3b692df7d2ef020927d8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Nov 2024 16:58:04 +0100 Subject: [PATCH 474/970] chore(sdk): Add trait bound `Compact` on `::Type` (#12534) --- crates/primitives-traits/src/transaction/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index bb6f6a711e3..4d7ab78685f 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -8,12 +8,12 @@ use alloy_primitives::B256; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; -use crate::{InMemorySize, MaybeArbitrary, TxType}; +use crate::{FullTxType, InMemorySize, MaybeArbitrary, TxType}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} +pub trait FullTransaction: Transaction + Compact {} -impl FullTransaction for T where T: Transaction + Compact {} +impl FullTransaction for T where T: Transaction + Compact {} /// Abstraction of a transaction. pub trait Transaction: From b1635fcba2fdaa2f71747b231d4aeda6bf457b0c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Nov 2024 17:10:46 +0100 Subject: [PATCH 475/970] chore(sdk): make `BlockBatchRecord` generic over receipt (#12449) --- Cargo.lock | 217 +++++++++++++++++++++++---------------- crates/revm/Cargo.toml | 5 +- crates/revm/src/batch.rs | 39 ++++--- 3 files changed, 156 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33d50319339..e37e52f0d94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,14 +146,14 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" +checksum = "ef2364c782a245cf8725ea6dbfca5f530162702b5d685992ea03ce64529136cc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -250,7 +250,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -274,7 +274,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -302,7 +302,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", "url", ] @@ -373,7 +373,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "url", @@ -494,7 +494,7 @@ dependencies = [ "alloy-rpc-types-engine", "serde", "serde_with", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -573,7 +573,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -611,7 +611,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -629,7 +629,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -714,7 +714,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.5.1", "tracing", @@ -1400,7 +1400,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -1625,7 +1625,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1803,7 +1803,7 @@ dependencies = [ "k256", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1819,7 +1819,7 @@ dependencies = [ "pbkdf2", "rand 0.8.5", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1838,7 +1838,7 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2627,7 +2627,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "walkdir", ] @@ -2782,7 +2782,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2871,7 +2871,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -3142,7 +3142,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3455,7 +3455,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4300,7 +4300,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror", + "thiserror 1.0.69", "walkdir", ] @@ -4363,7 +4363,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-rustls", "tokio-util", @@ -4391,7 +4391,7 @@ dependencies = [ "rustc-hash 2.0.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -4416,7 +4416,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tracing", @@ -4455,7 +4455,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -4472,7 +4472,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4627,7 +4627,7 @@ dependencies = [ "multihash", "quick-protobuf", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "tracing", "zeroize", ] @@ -4865,7 +4865,7 @@ dependencies = [ "metrics", "metrics-util", "quanta", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4915,7 +4915,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -5573,7 +5573,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -5763,7 +5763,7 @@ dependencies = [ "smallvec", "symbolic-demangle", "tempfile", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6011,9 +6011,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", @@ -6022,26 +6022,29 @@ dependencies = [ "rustc-hash 2.0.0", "rustls", "socket2", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom 0.2.15", "rand 0.8.5", "ring", "rustc-hash 2.0.0", "rustls", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] @@ -6227,7 +6230,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6491,7 +6494,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -6526,7 +6529,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tracing", @@ -6580,7 +6583,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6735,7 +6738,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror", + "thiserror 1.0.69", "tikv-jemallocator", "tracy-client", ] @@ -6878,7 +6881,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6934,7 +6937,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -6976,7 +6979,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -7001,7 +7004,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -7027,7 +7030,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -7066,7 +7069,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -7132,7 +7135,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -7205,7 +7208,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", ] @@ -7254,7 +7257,7 @@ dependencies = [ "reth-trie", "reth-trie-parallel", "revm-primitives", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -7301,7 +7304,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7332,7 +7335,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -7360,7 +7363,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7619,7 +7622,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -7645,7 +7648,7 @@ version = "1.1.1" dependencies = [ "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7687,7 +7690,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -7712,7 +7715,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -7751,7 +7754,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -7809,7 +7812,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -7834,7 +7837,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", ] @@ -7872,7 +7875,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror", + "thiserror 1.0.69", "tokio", "url", ] @@ -7903,7 +7906,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", "zstd", ] @@ -8036,7 +8039,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror", + "thiserror 1.0.69", "tokio", "toml", "tracing", @@ -8350,7 +8353,7 @@ dependencies = [ "reth-trie", "revm", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -8401,7 +8404,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -8454,7 +8457,7 @@ dependencies = [ "reth-primitives", "reth-transaction-pool", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -8618,7 +8621,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.0.0", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -8639,7 +8642,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror", + "thiserror 1.0.69", "toml", ] @@ -8653,6 +8656,7 @@ dependencies = [ "reth-ethereum-forks", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-storage-api", "reth-storage-errors", @@ -8724,7 +8728,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tower 0.4.13", @@ -8818,7 +8822,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-util", "tower 0.4.13", @@ -8858,7 +8862,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -8941,7 +8945,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -9042,7 +9046,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -9069,7 +9073,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -9169,7 +9173,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "tracing-futures", @@ -9251,7 +9255,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -9362,7 +9366,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -9384,7 +9388,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9418,7 +9422,7 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9760,6 +9764,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-platform-verifier" @@ -10023,7 +10030,7 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -10259,7 +10266,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -10601,7 +10608,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -10615,6 +10631,17 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "thiserror-impl-no-std" version = "2.0.2" @@ -10977,7 +11004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] @@ -11128,7 +11155,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "smallvec", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -11151,7 +11178,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "trust-dns-proto", @@ -11179,7 +11206,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror", + "thiserror 1.0.69", "utf-8", ] @@ -11535,6 +11562,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.26.6" @@ -11910,7 +11947,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 3ee68010108..bd2251e0333 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -19,6 +19,7 @@ reth-execution-errors.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } +reth-primitives-traits.workspace = true # alloy alloy-eips.workspace = true @@ -41,13 +42,15 @@ std = [ "revm/std", "alloy-eips/std", "alloy-consensus/std", + "reth-primitives-traits/std", ] test-utils = [ "dep:reth-trie", "reth-primitives/test-utils", "reth-trie?/test-utils", "revm/test-utils", - "reth-prune-types/test-utils" + "reth-prune-types/test-utils", + "reth-primitives-traits/test-utils", ] serde = [ "revm/serde", diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index be3ef0a3782..ddb88505b8d 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,10 +1,12 @@ //! Helper for handling execution of multiple blocks. use alloc::vec::Vec; + use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, Address, BlockNumber}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; -use reth_primitives::{Receipt, Receipts}; +use reth_primitives::Receipts; +use reth_primitives_traits::Receipt; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; @@ -13,7 +15,7 @@ use revm::db::states::bundle_state::BundleRetention; /// - pruning receipts according to the pruning configuration. /// - batch range if known #[derive(Debug, Default)] -pub struct BlockBatchRecord { +pub struct BlockBatchRecord { /// Pruning configuration. prune_modes: PruneModes, /// The collection of receipts. @@ -21,7 +23,7 @@ pub struct BlockBatchRecord { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - receipts: Receipts, + receipts: Receipts, /// The collection of EIP-7685 requests. /// Outer vector stores requests for each block sequentially. /// The inner vector stores requests ordered by transaction number. @@ -41,9 +43,12 @@ pub struct BlockBatchRecord { tip: Option, } -impl BlockBatchRecord { +impl BlockBatchRecord { /// Create a new receipts recorder with the given pruning configuration. - pub fn new(prune_modes: PruneModes) -> Self { + pub fn new(prune_modes: PruneModes) -> Self + where + T: Default, + { Self { prune_modes, ..Default::default() } } @@ -73,12 +78,15 @@ impl BlockBatchRecord { } /// Returns the recorded receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns all recorded receipts. - pub fn take_receipts(&mut self) -> Receipts { + pub fn take_receipts(&mut self) -> Receipts + where + T: Default, + { core::mem::take(&mut self.receipts) } @@ -111,7 +119,10 @@ impl BlockBatchRecord { } /// Save receipts to the executor. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> + where + T: Receipt, + { let mut receipts = receipts.into_iter().map(Some).collect(); // Prune receipts if necessary. self.prune_receipts(&mut receipts).map_err(InternalBlockExecutionError::from)?; @@ -121,10 +132,10 @@ impl BlockBatchRecord { } /// Prune receipts according to the pruning configuration. - fn prune_receipts( - &mut self, - receipts: &mut Vec>, - ) -> Result<(), PruneSegmentError> { + fn prune_receipts(&mut self, receipts: &mut Vec>) -> Result<(), PruneSegmentError> + where + T: Receipt, + { let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; let block_number = first_block + self.receipts.len() as u64; @@ -161,7 +172,7 @@ impl BlockBatchRecord { // If there is an address_filter, it does not contain any of the // contract addresses, then remove this receipt. let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); - if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { + if !inner_receipt.logs().iter().any(|log| filter.contains(&log.address)) { receipt.take(); } } @@ -186,7 +197,7 @@ mod tests { #[test] fn test_save_receipts_empty() { - let mut recorder = BlockBatchRecord::default(); + let mut recorder: BlockBatchRecord = BlockBatchRecord::default(); // Create an empty vector of receipts let receipts = vec![]; From bd29f82567134537a6052d2b479e986a641a916b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 18:15:27 +0100 Subject: [PATCH 476/970] chore: replace reth-chainspec dep with alloy chains (#12550) --- Cargo.lock | 2 +- crates/net/eth-wire/Cargo.toml | 9 +++++---- crates/net/eth-wire/src/errors/eth.rs | 2 +- crates/net/eth-wire/src/ethstream.rs | 2 +- crates/net/eth-wire/src/test_utils.rs | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e37e52f0d94..216088877bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7311,6 +7311,7 @@ dependencies = [ name = "reth-eth-wire" version = "1.1.1" dependencies = [ + "alloy-chains", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7323,7 +7324,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "reth-chainspec", "reth-codecs", "reth-ecies", "reth-eth-wire-types", diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 83a3e163ebc..791f05cc9ac 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-codecs.workspace = true reth-primitives.workspace = true reth-ecies.workspace = true @@ -23,6 +22,7 @@ reth-network-peers.workspace = true # ethereum alloy-primitives.workspace = true +alloy-chains.workspace = true # metrics reth-metrics.workspace = true @@ -69,10 +69,10 @@ arbitrary = [ "reth-primitives/arbitrary", "reth-eth-wire-types/arbitrary", "dep:arbitrary", - "reth-chainspec/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-codecs/arbitrary", + "alloy-chains/arbitrary" ] serde = [ "dep:serde", @@ -82,7 +82,8 @@ serde = [ "bytes/serde", "rand/serde", "secp256k1/serde", - "reth-codecs/serde" + "reth-codecs/serde", + "alloy-chains/serde" ] [[test]] diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 1f8b995afda..e06d8230320 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -3,8 +3,8 @@ use crate::{ errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, }; +use alloy_chains::Chain; use alloy_primitives::B256; -use reth_chainspec::Chain; use reth_eth_wire_types::EthVersion; use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; use std::io; diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index c971f6182ce..25b135d5637 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -365,9 +365,9 @@ mod tests { EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, ProtocolVersion, Status, }; + use alloy_chains::NamedChain; use alloy_primitives::{B256, U256}; use futures::{SinkExt, StreamExt}; - use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; use reth_eth_wire_types::EthNetworkPrimitives; use reth_network_peers::pk2id; diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index d7a3aa582b7..0ad83d5d944 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -6,8 +6,8 @@ use crate::{ hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; +use alloy_chains::Chain; use alloy_primitives::{B256, U256}; -use reth_chainspec::Chain; use reth_network_peers::pk2id; use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; From a7bb1d1fa33836533d8e3f1c07d64f4872cb1d5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 18:36:31 +0100 Subject: [PATCH 477/970] chore: remove constants reexport (#12549) --- Cargo.lock | 5 +++++ bin/reth-bench/Cargo.toml | 1 + bin/reth-bench/src/bench/output.rs | 2 +- crates/consensus/consensus/Cargo.toml | 16 +++++++++++----- crates/consensus/consensus/src/lib.rs | 5 +++-- crates/ethereum/consensus/Cargo.toml | 1 + crates/ethereum/consensus/src/lib.rs | 5 ++--- crates/net/eth-wire/Cargo.toml | 4 +++- crates/net/eth-wire/src/hello.rs | 2 +- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 3 ++- crates/primitives/src/constants/mod.rs | 3 --- crates/primitives/src/lib.rs | 2 -- crates/storage/db-common/src/init.rs | 5 +++-- 14 files changed, 34 insertions(+), 21 deletions(-) delete mode 100644 crates/primitives/src/constants/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 216088877bd..dbc1230f2c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6438,6 +6438,7 @@ dependencies = [ "reth-payload-builder", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-tasks", @@ -6526,6 +6527,7 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-primitives", + "reth-primitives-traits", "reth-rpc-types-compat", "reth-tracing", "serde", @@ -6802,6 +6804,7 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "reth-primitives", + "reth-primitives-traits", ] [[package]] @@ -7330,6 +7333,7 @@ dependencies = [ "reth-metrics", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-tracing", "secp256k1", "serde", @@ -7388,6 +7392,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-primitives", + "reth-primitives-traits", "tracing", ] diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 03844633a92..0182076130c 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -20,6 +20,7 @@ reth-node-core.workspace = true reth-node-api.workspace = true reth-rpc-types-compat.workspace = true reth-primitives = { workspace = true, features = ["alloy-compat"] } +reth-primitives-traits.workspace = true reth-tracing.workspace = true # alloy diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 8f68dac4533..56343c6af64 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -1,7 +1,7 @@ //! Contains various benchmark output formats, either for logging or for //! serialization to / from files. -use reth_primitives::constants::gas_units::GIGAGAS; +use reth_primitives_traits::constants::GIGAGAS; use serde::{ser::SerializeStruct, Serialize}; use std::time::Duration; diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index d120d268bd9..55188dd8472 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-eips.workspace = true @@ -26,9 +27,14 @@ derive_more.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", - "alloy-primitives/std", - "alloy-eips/std", - "alloy-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "alloy-primitives/std", + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std" +] +test-utils = [ + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" ] -test-utils = ["reth-primitives/test-utils"] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index a8f0a01f22b..ec296f3ed49 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,9 +16,10 @@ use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, - InvalidTransactionError, Receipt, SealedBlock, SealedHeader, + BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, InvalidTransactionError, Receipt, + SealedBlock, SealedHeader, }; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; /// A consensus implementation that does nothing. pub mod noop; diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index bace4195ca6..8e6158ff46c 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-chainspec.workspace = true reth-consensus-common.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-consensus.workspace = true # alloy diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 3dc7a02af8b..7198a703672 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -18,9 +18,8 @@ use reth_consensus_common::validation::{ validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, validate_header_base_fee, validate_header_extradata, validate_header_gas, }; -use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 791f05cc9ac..d4989ca3b29 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-codecs.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-eth-wire-types.workspace = true @@ -72,7 +73,8 @@ arbitrary = [ "alloy-eips/arbitrary", "alloy-primitives/arbitrary", "reth-codecs/arbitrary", - "alloy-chains/arbitrary" + "alloy-chains/arbitrary", + "reth-primitives-traits/arbitrary" ] serde = [ "dep:serde", diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 2eb42eaeb49..5d7650b4b7b 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,7 @@ use crate::{Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::add_arbitrary_tests; use reth_network_peers::PeerId; -use reth_primitives::constants::RETH_CLIENT_VERSION; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; /// The default tcp port for p2p. /// diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 74dea45d10d..5e9e524f79b 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 9b36e44b1fc..a905f854448 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -20,7 +20,8 @@ use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJo use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader}; +use reth_primitives::{proofs, SealedHeader}; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs deleted file mode 100644 index 09c488cc25a..00000000000 --- a/crates/primitives/src/constants/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Ethereum protocol-related constants - -pub use reth_primitives_traits::constants::*; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 534b525f086..2318b3c2455 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -26,7 +26,6 @@ mod alloy_compat; mod block; #[cfg(feature = "reth-codec")] mod compression; -pub mod constants; pub mod proofs; mod receipt; pub use reth_static_file_types as static_file; @@ -36,7 +35,6 @@ pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::HOLESKY_GENESIS_HASH; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 8c930b22ef8..45fb4b76b31 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -584,7 +584,9 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::{MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use alloy_consensus::constants::{ + HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; @@ -595,7 +597,6 @@ mod tests { transaction::DbTx, Database, }; - use reth_primitives::HOLESKY_GENESIS_HASH; use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, From 870ffae9094e411343781f7a1dc29d503740f168 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 19:03:15 +0100 Subject: [PATCH 478/970] chore: use crates directly in eth-wire (#12554) --- Cargo.lock | 1 + crates/net/eth-wire/Cargo.toml | 8 +++++--- crates/net/eth-wire/src/errors/eth.rs | 3 ++- crates/net/eth-wire/src/errors/p2p.rs | 2 +- crates/net/eth-wire/src/ethstream.rs | 5 +++-- crates/net/eth-wire/src/multiplex.rs | 2 +- crates/net/eth-wire/src/p2pstream.rs | 2 +- crates/net/eth-wire/src/test_utils.rs | 2 +- 8 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbc1230f2c8..12e1a4a8cda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7330,6 +7330,7 @@ dependencies = [ "reth-codecs", "reth-ecies", "reth-eth-wire-types", + "reth-ethereum-forks", "reth-metrics", "reth-network-peers", "reth-primitives", diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index d4989ca3b29..3999f658e0a 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -14,12 +14,12 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-eth-wire-types.workspace = true reth-network-peers.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true @@ -45,6 +45,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-eth-wire-types = { workspace = true, features = ["arbitrary"] } reth-tracing.workspace = true @@ -67,14 +68,15 @@ alloy-eips.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", "reth-eth-wire-types/arbitrary", "dep:arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", "reth-codecs/arbitrary", "alloy-chains/arbitrary", - "reth-primitives-traits/arbitrary" + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives/arbitrary" ] serde = [ "dep:serde", diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index e06d8230320..499ff8089bf 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -6,7 +6,8 @@ use crate::{ use alloy_chains::Chain; use alloy_primitives::B256; use reth_eth_wire_types::EthVersion; -use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; +use reth_ethereum_forks::ValidationError; +use reth_primitives_traits::{GotExpected, GotExpectedBoxed}; use std::io; /// Errors when sending/receiving messages diff --git a/crates/net/eth-wire/src/errors/p2p.rs b/crates/net/eth-wire/src/errors/p2p.rs index 2cfef926984..f24e2cebc78 100644 --- a/crates/net/eth-wire/src/errors/p2p.rs +++ b/crates/net/eth-wire/src/errors/p2p.rs @@ -3,7 +3,7 @@ use std::io; use reth_eth_wire_types::{DisconnectReason, UnknownDisconnectReason}; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use crate::{capability::SharedCapabilityError, ProtocolVersion}; diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 25b135d5637..675ea19a5ce 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -9,7 +9,8 @@ use alloy_primitives::bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_eth_wire_types::NetworkPrimitives; -use reth_primitives::{ForkFilter, GotExpected}; +use reth_ethereum_forks::ForkFilter; +use reth_primitives_traits::GotExpected; use std::{ pin::Pin, task::{Context, Poll}, @@ -370,8 +371,8 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_ecies::stream::ECIESStream; use reth_eth_wire_types::EthNetworkPrimitives; + use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 6f882f40887..e46563cad48 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -25,7 +25,7 @@ use crate::{ use bytes::{Bytes, BytesMut}; use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use reth_eth_wire_types::NetworkPrimitives; -use reth_primitives::ForkFilter; +use reth_ethereum_forks::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 76075838bc7..0ae546daafb 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -14,7 +14,7 @@ use futures::{Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_codecs::add_arbitrary_tests; use reth_metrics::metrics::counter; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use std::{ collections::VecDeque, io, diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index 0ad83d5d944..56656d60e94 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -8,8 +8,8 @@ use crate::{ }; use alloy_chains::Chain; use alloy_primitives::{B256, U256}; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; -use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; From 28a5b631d15b71463d1fe26d5910d26c5dc95613 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 21:35:04 +0100 Subject: [PATCH 479/970] chore: move gas_spent_by_transactions to traits (#12541) --- crates/primitives-traits/src/receipt.rs | 17 +++++++++++++++-- crates/primitives/src/receipt.rs | 16 ++++------------ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 68917d62812..f3c9ef06356 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,9 +1,9 @@ //! Receipt abstraction -use core::fmt; - +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; +use core::fmt; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -32,3 +32,16 @@ pub trait Receipt: /// Calculates the receipts root of the given receipts. fn receipts_root(receipts: &[&Self]) -> B256; } + +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub fn gas_spent_by_transactions(receipts: I) -> Vec<(u64, u64)> +where + I: IntoIterator, + T: TxReceipt, +{ + receipts + .into_iter() + .enumerate() + .map(|(id, receipt)| (id as u64, receipt.cumulative_gas_used() as u64)) + .collect() +} diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 3258d4be6eb..41397181149 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,5 +1,5 @@ use alloc::{vec, vec::Vec}; -use core::{cmp::Ordering, ops::Deref}; +use core::cmp::Ordering; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, @@ -16,6 +16,9 @@ use serde::{Deserialize, Serialize}; use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::TxType; +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub use reth_primitives_traits::receipt::gas_spent_by_transactions; + /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, @@ -199,17 +202,6 @@ impl ReceiptWithBloom { } } -/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). -pub fn gas_spent_by_transactions>( - receipts: impl IntoIterator, -) -> Vec<(u64, u64)> { - receipts - .into_iter() - .enumerate() - .map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used)) - .collect() -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Receipt { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { From a534db8714c71f398dfad307c1b7c88222e9a971 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 00:57:31 +0400 Subject: [PATCH 480/970] refactor: use `DBProvider` in `HistoricalStateProvider` (#12556) --- .../src/providers/database/provider.rs | 9 +- .../src/providers/state/historical.rs | 249 +++++++----------- 2 files changed, 96 insertions(+), 162 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 62a44c175b0..53911b5d133 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -178,11 +178,7 @@ impl DatabaseProvider { let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = HistoricalStateProviderRef::new( - &self.tx, - block_number, - self.static_file_provider.clone(), - ); + let mut state_provider = HistoricalStateProviderRef::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -259,8 +255,7 @@ impl TryIntoHistoricalStateProvider for Databa let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = - HistoricalStateProvider::new(self.tx, block_number, self.static_file_provider); + let mut state_provider = HistoricalStateProvider::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 56a1d057e70..29ba70e2049 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ @@ -14,8 +14,8 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{BlockNumReader, DBProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, @@ -41,15 +41,13 @@ use std::fmt::Debug; /// - [`tables::AccountChangeSets`] /// - [`tables::StorageChangeSets`] #[derive(Debug)] -pub struct HistoricalStateProviderRef<'b, TX: DbTx> { - /// Transaction - tx: &'b TX, +pub struct HistoricalStateProviderRef<'b, Provider> { + /// Database provider + provider: &'b Provider, /// Block number is main index for the history state of accounts and storages. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } #[derive(Debug, Eq, PartialEq)] @@ -60,25 +58,20 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider + BlockNumReader> HistoricalStateProviderRef<'b, Provider> { /// Create new `StateProvider` for historical block number - pub fn new( - tx: &'b TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: &'b Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Create new `StateProvider` for historical block number and lowest block numbers at which /// account & storage histories are available. pub const fn new_with_lowest_available_blocks( - tx: &'b TX, + provider: &'b Provider, block_number: BlockNumber, lowest_available_blocks: LowestAvailableBlocks, - static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks, static_file_provider } + Self { provider, block_number, lowest_available_blocks } } /// Lookup an account in the `AccountsHistory` table @@ -117,15 +110,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { /// Checks and returns `true` if distance to historical block exceeds the provided limit. fn check_distance_against_limit(&self, limit: u64) -> ProviderResult { - let tip = self - .tx - .cursor_read::()? - .last()? - .map(|(tip, _)| tip) - .or_else(|| { - self.static_file_provider.get_highest_static_file_block(StaticFileSegment::Headers) - }) - .ok_or(ProviderError::BestBlockNotFound)?; + let tip = self.provider.last_block_number()?; Ok(tip.saturating_sub(self.block_number) > limit) } @@ -146,7 +131,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) + Ok(HashedPostState::from_reverts(self.tx(), self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -163,7 +148,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedStorage::from_reverts(self.tx, address, self.block_number)?) + Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) } fn history_info( @@ -175,7 +160,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { where T: Table, { - let mut cursor = self.tx.cursor_read::()?; + let mut cursor = self.tx().cursor_read::()?; // Lookup the history chunk in the history index. If they key does not appear in the // index, the first chunk for the next key will be returned so we filter out chunks that @@ -248,13 +233,21 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } } -impl AccountReader for HistoricalStateProviderRef<'_, TX> { +impl HistoricalStateProviderRef<'_, Provider> { + fn tx(&self) -> &Provider::Tx { + self.provider.tx_ref() + } +} + +impl AccountReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(changeset_block_number, address)? .filter(|acc| acc.address == address) @@ -264,21 +257,18 @@ impl AccountReader for HistoricalStateProviderRef<'_, TX> { })? .info), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { - Ok(self.tx.get::(address)?) + Ok(self.tx().get::(address)?) } } } } -impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { +impl BlockHashReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.provider.block_hash(number) } fn canonical_hashes_range( @@ -286,37 +276,23 @@ impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.provider.canonical_hashes_range(start, end) } } -impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StateRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root(self.tx, revert_state) + StateRoot::overlay_root(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -326,7 +302,7 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root_with_updates(self.tx, revert_state) + StateRoot::overlay_root_with_updates(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -335,12 +311,14 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StorageRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, @@ -348,7 +326,7 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageRoot::overlay_root(self.tx, address, revert_storage) + StorageRoot::overlay_root(self.tx(), address, revert_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -360,12 +338,14 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageProof::overlay_storage_proof(self.tx, address, slot, revert_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) .map_err(Into::::into) } } -impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProofProvider + for HistoricalStateProviderRef<'_, Provider> +{ /// Get account and storage proofs. fn proof( &self, @@ -374,7 +354,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -384,7 +364,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { targets: HashMap>, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -393,11 +373,13 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { target: HashedPostState, ) -> ProviderResult> { input.prepend(self.revert_state()?); - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProvider + for HistoricalStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, @@ -407,7 +389,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( - self.tx + self.tx() .cursor_dup_read::()? .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? .filter(|entry| entry.key == storage_key) @@ -419,7 +401,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { .value, )), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(address, storage_key)? .filter(|entry| entry.key == storage_key) @@ -430,32 +412,26 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] -pub struct HistoricalStateProvider { - /// Database transaction - tx: TX, +pub struct HistoricalStateProvider { + /// Database provider. + provider: Provider, /// State at the block number is the main indexer of the state. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } -impl HistoricalStateProvider { +impl HistoricalStateProvider { /// Create new `StateProvider` for historical block number - pub fn new( - tx: TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Set the lowest block number at which the account history is available. @@ -478,18 +454,17 @@ impl HistoricalStateProvider { /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> HistoricalStateProviderRef<'_, TX> { + const fn as_ref(&self) -> HistoricalStateProviderRef<'_, Provider> { HistoricalStateProviderRef::new_with_lowest_available_blocks( - &self.tx, + &self.provider, self.block_number, self.lowest_available_blocks, - self.static_file_provider.clone(), ) } } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -525,7 +500,6 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, - StaticFileProviderFactory, }; use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db::{tables, BlockNumberList}; @@ -534,6 +508,7 @@ mod tests { transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; + use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -542,7 +517,7 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider() { assert_state_provider::>(); } @@ -550,7 +525,6 @@ mod tests { fn history_provider_get_account() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( ShardedKey { key: ADDRESS, highest_block_number: 7 }, @@ -610,63 +584,46 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_acc_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 2, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 2).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 3).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 4).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 7).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 9).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 10).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 11).basic_account(ADDRESS), Ok(Some(acc_at15)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 16).basic_account(ADDRESS), Ok(Some(acc_plain)) ); + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(HIGHER_ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(HIGHER_ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .basic_account(HIGHER_ADDRESS), + HistoricalStateProviderRef::new(&db, 1000).basic_account(HIGHER_ADDRESS), Ok(Some(higher_acc_plain)) ); } @@ -675,7 +632,6 @@ mod tests { fn history_provider_get_storage() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( StorageShardedKey { @@ -722,57 +678,44 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_entry_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 0).storage(ADDRESS, STORAGE), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 0, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 3).storage(ADDRESS, STORAGE), Ok(Some(U256::ZERO)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 4).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 7).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 9).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 10).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 11).storage(ADDRESS, STORAGE), Ok(Some(entry_at15.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 16).storage(ADDRESS, STORAGE), Ok(Some(entry_plain.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1).storage(HIGHER_ADDRESS, STORAGE), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1000).storage(HIGHER_ADDRESS, STORAGE), Ok(Some(higher_entry_plain.value)) ); } @@ -780,19 +723,17 @@ mod tests { #[test] fn history_provider_unavailable() { let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); + let db = factory.database_provider_rw().unwrap(); // provider block_number < lowest available block number, // i.e. state at provider block is pruned let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(3), storage_history_block_number: Some(3), }, - static_file_provider.clone(), ); assert_eq!( provider.account_history_lookup(ADDRESS), @@ -806,13 +747,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(2), storage_history_block_number: Some(2), }, - static_file_provider.clone(), ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( @@ -823,13 +763,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(1), storage_history_block_number: Some(1), }, - static_file_provider, ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( From b1729d22e40fa43382bc3dcc843674d3a319c71c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Nov 2024 23:03:09 +0100 Subject: [PATCH 481/970] feat: impl block for sealedblock (#12555) --- crates/primitives-traits/src/block/body.rs | 5 ++- crates/primitives-traits/src/block/header.rs | 8 ++--- crates/primitives/src/block.rs | 32 +++++++++++++++++--- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index c5f15aefea6..bb52b89724b 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,11 +1,9 @@ //! Block body abstraction. +use crate::InMemorySize; use alloc::fmt; - use alloy_consensus::Transaction; -use crate::InMemorySize; - /// Abstraction for block's body. pub trait BlockBody: Send @@ -21,6 +19,7 @@ pub trait BlockBody: + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize + + 'static { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 7ab76f24987..0c1fc3e57f2 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -1,12 +1,10 @@ //! Block header data primitive. -use core::fmt; - +use crate::InMemorySize; use alloy_primitives::Sealable; +use core::fmt; use reth_codecs::Compact; -use crate::InMemorySize; - /// Helper trait that unifies all behaviour required by block header to support full node /// operations. pub trait FullBlockHeader: BlockHeader + Compact {} @@ -28,6 +26,7 @@ pub trait BlockHeader: + alloy_consensus::BlockHeader + Sealable + InMemorySize + + 'static { } @@ -47,5 +46,6 @@ impl BlockHeader for T where + alloy_consensus::BlockHeader + Sealable + InMemorySize + + 'static { } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 0f96a9d5842..d6476c29b4c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -90,13 +90,13 @@ impl reth_primitives_traits::Block for Block { type Header = Header; type Body = BlockBody; - fn body(&self) -> &Self::Body { - &self.body - } - fn header(&self) -> &Self::Header { &self.header } + + fn body(&self) -> &Self::Body { + &self.body + } } impl InMemorySize for Block { @@ -463,6 +463,24 @@ where } } +impl reth_primitives_traits::Block for SealedBlock +where + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, + Self: Serialize + for<'a> Deserialize<'a>, +{ + type Header = H; + type Body = B; + + fn header(&self) -> &Self::Header { + self.header.header() + } + + fn body(&self) -> &Self::Body { + &self.body + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock where @@ -959,6 +977,12 @@ mod tests { use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; + const fn _traits() { + const fn assert_block() {} + assert_block::(); + assert_block::(); + } + /// Check parsing according to EIP-1898. #[test] fn can_parse_blockid_u64() { From d8af28bbfac5e10b4ffcdca59a57997bed1a3a9f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 02:05:07 +0400 Subject: [PATCH 482/970] refactor: use `DBProvider` in `LatestStateProvider` (#12557) --- .../commands/debug_cmd/in_memory_merkle.rs | 7 +- bin/reth/src/commands/debug_cmd/merkle.rs | 7 +- crates/exex/exex/src/backfill/test_utils.rs | 13 +- crates/stages/stages/src/stages/execution.rs | 19 +-- .../provider/src/providers/database/mod.rs | 2 +- .../src/providers/database/provider.rs | 9 +- .../provider/src/providers/state/latest.rs | 116 +++++++----------- 7 files changed, 67 insertions(+), 106 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index bc36578a327..d5bb8a87b22 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -24,7 +24,7 @@ use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StaticFileProviderFactory, StorageReader, + StageCheckpointReader, StateWriter, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -133,10 +133,7 @@ impl> Command { ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider_factory.static_file_provider(), - )); + let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 3c6e38512c9..9c77c70abc7 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -22,7 +22,7 @@ use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, ProviderFactory, StateWriter, StaticFileProviderFactory, + ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -153,10 +153,7 @@ impl> Command { td += sealed_block.difficulty; let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new( - provider_rw.tx_ref(), - provider_rw.static_file_provider().clone(), - ), + LatestStateProviderRef::new(&provider_rw), )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index a1e88c7f428..80af408c5c8 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -14,7 +14,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, }; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; @@ -63,10 +63,7 @@ where // Execute the block to produce a block execution output let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - ))) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))) .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; block_execution_output.state.reverts.sort(); @@ -191,10 +188,8 @@ where let provider = provider_factory.provider()?; - let executor = - EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), - )); + let executor = EthExecutorProvider::ethereum(chain_spec) + .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))); let mut execution_outcome = executor.execute_and_verify_batch(vec![ (&block1, U256::ZERO).into(), diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 630cc6df03d..1750758a26a 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -16,9 +16,9 @@ use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, - BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, StatsReader, - TransactionVariant, + BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, + StatsReader, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -175,8 +175,12 @@ impl ExecutionStage { impl Stage for ExecutionStage where E: BlockExecutorProvider, - Provider: - DBProvider + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter, + Provider: DBProvider + + BlockReader + + StaticFileProviderFactory + + StatsReader + + StateChangeWriter + + BlockHashReader, for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, { /// Return the id of the stage @@ -220,10 +224,7 @@ where None }; - let db = StateProviderDatabase(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - )); + let db = StateProviderDatabase(LatestStateProviderRef::new(provider)); let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); executor.set_prune_modes(prune_modes); diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index b4d2e5e48b8..0e193f8cdef 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -160,7 +160,7 @@ impl ProviderFactory { #[track_caller] pub fn latest(&self) -> ProviderResult { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProvider::new(self.db.tx()?, self.static_file_provider()))) + Ok(Box::new(LatestStateProvider::new(self.database_provider_ro()?))) } /// Storage provider for state at that given block diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 53911b5d133..30a69fbfc77 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -151,7 +151,7 @@ impl DatabaseProvider { /// State provider for latest block pub fn latest<'a>(&'a self) -> ProviderResult> { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) + Ok(Box::new(LatestStateProviderRef::new(self))) } /// Storage provider for state at that given block hash @@ -164,10 +164,7 @@ impl DatabaseProvider { if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProviderRef::new( - &self.tx, - self.static_file_provider.clone(), - ))) + return Ok(Box::new(LatestStateProviderRef::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -244,7 +241,7 @@ impl TryIntoHistoricalStateProvider for Databa if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProvider::new(self.tx, self.static_file_provider))) + return Ok(Box::new(LatestStateProvider::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index fdcbfc4937f..297217acece 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,18 +1,15 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + StateProvider, StateRootProvider, }; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO}, - transaction::DbTx, -}; -use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{DBProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, @@ -26,37 +23,33 @@ use reth_trie_db::{ }; /// State provider over latest state that takes tx reference. +/// +/// Wraps a [`DBProvider`] to get access to database. #[derive(Debug)] -pub struct LatestStateProviderRef<'b, TX: DbTx> { - /// database transaction - tx: &'b TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProviderRef<'b, Provider>(&'b Provider); -impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider> LatestStateProviderRef<'b, Provider> { /// Create new state provider - pub const fn new(tx: &'b TX, static_file_provider: StaticFileProvider) -> Self { - Self { tx, static_file_provider } + pub const fn new(provider: &'b Provider) -> Self { + Self(provider) + } + + fn tx(&self) -> &Provider::Tx { + self.0.tx_ref() } } -impl AccountReader for LatestStateProviderRef<'_, TX> { +impl AccountReader for LatestStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - self.tx.get::(address).map_err(Into::into) + self.tx().get::(address).map_err(Into::into) } } -impl BlockHashReader for LatestStateProviderRef<'_, TX> { +impl BlockHashReader for LatestStateProviderRef<'_, Provider> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.0.block_hash(number) } fn canonical_hashes_range( @@ -64,34 +57,18 @@ impl BlockHashReader for LatestStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.0.canonical_hashes_range(start, end) } } -impl StateRootProvider for LatestStateProviderRef<'_, TX> { +impl StateRootProvider for LatestStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { - StateRoot::overlay_root(self.tx, hashed_state) + StateRoot::overlay_root(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -99,7 +76,7 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_with_updates(self.tx, hashed_state) + StateRoot::overlay_root_with_updates(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -107,18 +84,18 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for LatestStateProviderRef<'_, TX> { +impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { fn storage_root( &self, address: Address, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageRoot::overlay_root(self.tx, address, hashed_storage) + StorageRoot::overlay_root(self.tx(), address, hashed_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -128,19 +105,19 @@ impl StorageRootProvider for LatestStateProviderRef<'_, TX> { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageProof::overlay_storage_proof(self.tx, address, slot, hashed_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) .map_err(Into::::into) } } -impl StateProofProvider for LatestStateProviderRef<'_, TX> { +impl StateProofProvider for LatestStateProviderRef<'_, Provider> { fn proof( &self, input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -149,7 +126,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, targets: HashMap>, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -157,18 +134,20 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for LatestStateProviderRef<'_, TX> { +impl StateProvider + for LatestStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, account: Address, storage_key: StorageKey, ) -> ProviderResult> { - let mut cursor = self.tx.cursor_dup_read::()?; + let mut cursor = self.tx().cursor_dup_read::()?; if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? { if entry.key == storage_key { return Ok(Some(entry.value)) @@ -179,34 +158,29 @@ impl StateProvider for LatestStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } /// State provider for the latest state. #[derive(Debug)] -pub struct LatestStateProvider { - /// database transaction - db: TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProvider(Provider); -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider - pub const fn new(db: TX, static_file_provider: StaticFileProvider) -> Self { - Self { db, static_file_provider } + pub const fn new(db: Provider) -> Self { + Self(db) } /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> LatestStateProviderRef<'_, TX> { - LatestStateProviderRef::new(&self.db, self.static_file_provider.clone()) + const fn as_ref(&self) -> LatestStateProviderRef<'_, Provider> { + LatestStateProviderRef::new(&self.0) } } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [TX: DbTx]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader]); #[cfg(test)] mod tests { @@ -214,7 +188,7 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider() { assert_state_provider::>(); } } From d028c1cbb4ad0df449cedb283f50085ed9579e2d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 02:07:57 +0400 Subject: [PATCH 483/970] refactor: don't reference `StaticFileProvider` in `static_file::Segment` (#12558) --- .../beacon/src/engine/hooks/static_file.rs | 10 ++++++++-- crates/stages/stages/src/stages/headers.rs | 14 +++++++------- .../static-file/src/segments/headers.rs | 9 +++------ crates/static-file/static-file/src/segments/mod.rs | 8 ++++---- .../static-file/src/segments/receipts.rs | 9 +++++---- .../static-file/src/segments/transactions.rs | 9 +++++---- .../static-file/src/static_file_producer.rs | 6 ++++-- 7 files changed, 36 insertions(+), 29 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 89231ed5582..99854209cb3 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -33,7 +33,10 @@ impl StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory + + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { /// Create a new instance @@ -145,7 +148,10 @@ impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory + + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { fn name(&self) -> &'static str { diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 133a0719d91..1ec55f7fd80 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -13,9 +13,8 @@ use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDown use reth_primitives::{SealedHeader, StaticFileSegment}; use reth_primitives_traits::serde_bincode_compat; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, - StaticFileProviderFactory, + providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, StaticFileProviderFactory, }; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, @@ -90,15 +89,16 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers( + fn write_headers + StaticFileProviderFactory>( &mut self, - provider: &impl DBProvider, - static_file_provider: StaticFileProvider, + provider: &P, ) -> Result { let total_headers = self.header_collector.len(); info!(target: "sync::stages::headers", total = total_headers, "Writing headers"); + let static_file_provider = provider.static_file_provider(); + // Consistency check of expected headers in static files vs DB is done on provider::sync_gap // when poll_execute_ready is polled. let mut last_header_number = static_file_provider @@ -293,7 +293,7 @@ where // Write the headers and related tables to DB from ETL space let to_be_processed = self.hash_collector.len() as u64; - let last_header_number = self.write_headers(provider, provider.static_file_provider())?; + let last_header_number = self.write_headers(provider)?; // Clear ETL collectors self.hash_collector.clear(); diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 54d5bee65cf..650f4998764 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -2,10 +2,7 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - DBProvider, -}; +use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -14,7 +11,7 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } @@ -22,9 +19,9 @@ impl Segment for Headers { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index 3d961c7b119..fc79effdd5a 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -10,22 +10,22 @@ mod receipts; pub use receipts::Receipts; use alloy_primitives::BlockNumber; -use reth_provider::providers::StaticFileProvider; +use reth_provider::StaticFileProviderFactory; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. -pub trait Segment: Send + Sync { +pub trait Segment: Send + Sync { /// Returns the [`StaticFileSegment`]. fn segment(&self) -> StaticFileSegment; - /// Move data to static files for the provided block range. [`StaticFileProvider`] will handle + /// Move data to static files for the provided block range. + /// [`StaticFileProvider`](reth_provider::providers::StaticFileProvider) will handle /// the management of and writing to files. fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 4e2185a598a..0442c360099 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -3,8 +3,7 @@ use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +13,9 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Receipts; -impl Segment for Receipts { +impl Segment + for Receipts +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Receipts } @@ -22,9 +23,9 @@ impl Segment for Receipts { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Receipts)?; diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 52e0ca8b575..eba1987080c 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -3,8 +3,7 @@ use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +13,9 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment for Transactions { +impl Segment + for Transactions +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions } @@ -24,9 +25,9 @@ impl Segment for Transactions { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 0f07ec32821..8959819e821 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -85,7 +85,9 @@ where impl StaticFileProducerInner where Provider: StaticFileProviderFactory - + DatabaseProviderFactory, + + DatabaseProviderFactory< + Provider: StaticFileProviderFactory + StageCheckpointReader + BlockReader, + >, { /// Listen for events on the `static_file_producer`. pub fn events(&self) -> EventStream { @@ -136,7 +138,7 @@ where // Create a new database transaction on every segment to prevent long-lived read-only // transactions let provider = self.provider.database_provider_ro()?.disable_long_read_transaction_safety(); - segment.copy_to_static_files(provider, self.provider.static_file_provider(), block_range.clone())?; + segment.copy_to_static_files(provider, block_range.clone())?; let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", segment = %segment.segment(), ?block_range, ?elapsed, "Finished StaticFileProducer segment"); From 61d32e9bfdfadbc1f8bbcaefbbc8d83bbd9bc5e6 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 14 Nov 2024 17:37:39 -0500 Subject: [PATCH 484/970] fix(rpc): remove reference to preimage bool in debug_executionWitness (#12559) --- crates/rpc/rpc-api/src/debug.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 1b857d4a11f..76316fa71f4 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -136,8 +136,7 @@ pub trait DebugApi { /// to their preimages that were required during the execution of the block, including during /// state root recomputation. /// - /// The first argument is the block number or block hash. The second argument is a boolean - /// indicating whether to include the preimages of keys in the response. + /// The first argument is the block number or block hash. #[method(name = "executionWitness")] async fn debug_execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; From 7a7a6de2cd7db570b9e07eca58bf59cd3e5f7d9e Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:35:00 +0100 Subject: [PATCH 485/970] primitive-traits: simplify `SealedHeader::default` (#12563) --- crates/primitives-traits/src/header/sealed.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index e872eb9811d..ef81891d552 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -78,9 +78,7 @@ impl InMemorySize for SealedHeader { impl Default for SealedHeader { fn default() -> Self { - let sealed = H::default().seal_slow(); - let (header, hash) = sealed.into_parts(); - Self { header, hash } + Self::seal(H::default()) } } From 28478a5144acbefa883056d9096e25837ab0cf60 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:35:27 +0100 Subject: [PATCH 486/970] primitives: rm alloy `HOLESKY_GENESIS_HASH` reexport (#12562) --- crates/chainspec/src/spec.rs | 6 ++++-- crates/primitives-traits/src/constants/mod.rs | 6 ------ 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index fdaad948f26..1f8ebd45f45 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -12,7 +12,9 @@ use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; use alloy_consensus::{ - constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, + constants::{ + DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }, Header, }; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; @@ -24,7 +26,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 94eaf95c269..e927ed3a7df 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,7 +1,5 @@ //! Ethereum protocol-related constants -use alloy_primitives::{b256, B256}; - /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; @@ -12,10 +10,6 @@ pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION" /// Minimum gas limit allowed for transactions. pub const MINIMUM_GAS_LIMIT: u64 = 5000; -/// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` -pub const HOLESKY_GENESIS_HASH: B256 = - b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only From 1aa316e4bcca4277f8e39847a60eca26f13f6115 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:41:37 +0100 Subject: [PATCH 487/970] fmt(primitives): group pub use transaction (#12561) --- crates/primitives/src/lib.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2318b3c2455..87bf254edab 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -44,13 +44,10 @@ pub use reth_primitives_traits::{ }; pub use static_file::StaticFileSegment; -pub use transaction::{ - BlobTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, -}; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, + BlobTransaction, InvalidTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, }; From 93ec6d48fe58f9352955f17131e92690e6562741 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:43:21 +0100 Subject: [PATCH 488/970] net: use `BlockWithParent` in `SyncTarget::Gap` (#12514) --- crates/net/downloaders/src/headers/reverse_headers.rs | 11 ++++++++--- crates/net/p2p/src/headers/downloader.rs | 5 +++-- crates/primitives-traits/src/header/mod.rs | 2 +- crates/primitives-traits/src/header/sealed.rs | 11 ++++++++++- crates/primitives-traits/src/lib.rs | 2 +- 5 files changed, 23 insertions(+), 8 deletions(-) diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 3960ae6e812..0f8111e4395 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -707,13 +707,13 @@ where } } SyncTarget::Gap(existing) => { - let target = existing.parent_hash; + let target = existing.parent; if Some(target) != current_tip { // there could be a sync target request in progress self.sync_target_request.take(); // If the target has changed, update the request pointers based on the new // targeted block number - let parent_block_number = existing.number.saturating_sub(1); + let parent_block_number = existing.block.number.saturating_sub(1); trace!(target: "downloaders::headers", current=?current_tip, new=?target, %parent_block_number, "Updated sync target"); @@ -1225,9 +1225,11 @@ mod tests { use super::*; use crate::headers::test_utils::child_header; use alloy_consensus::Header; + use alloy_eips::BlockNumHash; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; + use reth_primitives_traits::BlockWithParent; /// Tests that `replace_number` works the same way as `Option::replace` #[test] @@ -1307,7 +1309,10 @@ mod tests { assert!(downloader.sync_target_request.is_some()); downloader.sync_target_request.take(); - let target = SyncTarget::Gap(SealedHeader::new(Default::default(), B256::random())); + let target = SyncTarget::Gap(BlockWithParent { + block: BlockNumHash::new(0, B256::random()), + parent: Default::default(), + }); downloader.update_sync_target(target); assert!(downloader.sync_target_request.is_none()); assert_matches!( diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 59ecb58b84d..f02d9461fc1 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -6,6 +6,7 @@ use alloy_primitives::B256; use futures::Stream; use reth_consensus::Consensus; use reth_primitives::SealedHeader; +use reth_primitives_traits::BlockWithParent; /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, @@ -57,7 +58,7 @@ pub enum SyncTarget { /// /// The benefit of this variant is, that this already provides the block number of the highest /// missing block. - Gap(SealedHeader), + Gap(BlockWithParent), /// This represents a tip by block number TipNum(u64), } @@ -72,7 +73,7 @@ impl SyncTarget { pub fn tip(&self) -> BlockHashOrNumber { match self { Self::Tip(tip) => (*tip).into(), - Self::Gap(gap) => gap.parent_hash.into(), + Self::Gap(gap) => gap.parent.into(), Self::TipNum(num) => (*num).into(), } } diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index 760abf33720..ecd5725838e 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::SealedHeader; +pub use sealed::{BlockWithParent, SealedHeader}; mod error; pub use error::HeaderError; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index ef81891d552..dab54977c51 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -2,7 +2,7 @@ use super::Header; use crate::InMemorySize; use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; -use alloy_primitives::{keccak256, BlockHash, Sealable}; +use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use core::mem; @@ -10,6 +10,15 @@ use derive_more::{AsRef, Deref}; use reth_codecs::add_arbitrary_tests; use serde::{Deserialize, Serialize}; +/// A helper struct to store the block number/hash and its parent hash. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct BlockWithParent { + /// Parent hash. + pub parent: B256, + /// Block number/hash. + pub block: BlockNumHash, +} + /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index b8f0aa4c8a8..584181f2c95 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -59,7 +59,7 @@ pub use tx_type::{FullTxType, TxType}; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{HeaderError, SealedHeader}; +pub use header::{BlockWithParent, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// From 44964ac17124643b8fc745565cdcf9e457492377 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:56:46 +0100 Subject: [PATCH 489/970] engine-primitives: make engine-tree independent of beacon-consensus crate (#12560) --- Cargo.lock | 8 ++++++- crates/consensus/beacon/src/engine/error.rs | 21 ------------------- crates/consensus/beacon/src/engine/event.rs | 2 +- crates/consensus/beacon/src/engine/handle.rs | 10 ++++----- crates/consensus/beacon/src/engine/mod.rs | 17 ++++++--------- .../consensus/beacon/src/engine/test_utils.rs | 4 ++-- crates/engine/local/src/miner.rs | 3 +-- crates/engine/local/src/service.rs | 3 ++- crates/engine/primitives/Cargo.toml | 7 +++++++ crates/engine/primitives/src/error.rs | 20 ++++++++++++++++++ .../primitives/src}/forkchoice.rs | 8 +++---- crates/engine/primitives/src/lib.rs | 9 ++++++++ .../primitives/src}/message.rs | 3 +-- crates/engine/service/Cargo.toml | 1 + crates/engine/service/src/service.rs | 4 +++- crates/engine/tree/src/engine.rs | 4 ++-- crates/engine/tree/src/tree/mod.rs | 21 +++++++++++-------- crates/engine/util/Cargo.toml | 8 +++---- crates/engine/util/src/engine_store.rs | 3 +-- crates/engine/util/src/lib.rs | 3 +-- crates/engine/util/src/reorg.rs | 6 ++++-- crates/engine/util/src/skip_fcu.rs | 3 +-- crates/engine/util/src/skip_new_payload.rs | 3 +-- crates/node/events/Cargo.toml | 1 + crates/node/events/src/node.rs | 5 ++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 3 ++- crates/rpc/rpc-engine-api/src/error.rs | 3 ++- 27 files changed, 101 insertions(+), 82 deletions(-) create mode 100644 crates/engine/primitives/src/error.rs rename crates/{consensus/beacon/src/engine => engine/primitives/src}/forkchoice.rs (98%) rename crates/{consensus/beacon/src/engine => engine/primitives/src}/message.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index 12e1a4a8cda..d92f66e8a58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7181,11 +7181,16 @@ name = "reth-engine-primitives" version = "1.1.1" dependencies = [ "alloy-primitives", + "alloy-rpc-types-engine", + "futures", + "reth-errors", "reth-execution-types", "reth-payload-primitives", "reth-primitives", "reth-trie", "serde", + "thiserror 1.0.69", + "tokio", ] [[package]] @@ -7197,6 +7202,7 @@ dependencies = [ "reth-beacon-consensus", "reth-chainspec", "reth-consensus", + "reth-engine-primitives", "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", @@ -7278,7 +7284,6 @@ dependencies = [ "futures", "itertools 0.13.0", "pin-project", - "reth-beacon-consensus", "reth-engine-primitives", "reth-errors", "reth-ethereum-forks", @@ -8109,6 +8114,7 @@ dependencies = [ "humantime", "pin-project", "reth-beacon-consensus", + "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", "reth-prune", diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 5fc6df2b884..2092ea49f77 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -77,24 +77,3 @@ impl From for BeaconForkChoiceUpdateError { Self::internal(e) } } - -/// Represents all error cases when handling a new payload. -/// -/// This represents all possible error cases that must be returned as JSON RCP errors back to the -/// beacon node. -#[derive(Debug, thiserror::Error)] -pub enum BeaconOnNewPayloadError { - /// Thrown when the engine task is unavailable/stopped. - #[error("beacon consensus engine task stopped")] - EngineUnavailable, - /// An internal error occurred, not necessarily related to the payload. - #[error(transparent)] - Internal(Box), -} - -impl BeaconOnNewPayloadError { - /// Create a new internal error. - pub fn internal(e: E) -> Self { - Self::Internal(Box::new(e)) - } -} diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 975085a32f3..b76b85374cd 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,6 +1,6 @@ -use crate::engine::forkchoice::ForkchoiceStatus; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; +use reth_engine_primitives::ForkchoiceStatus; use reth_primitives::{SealedBlock, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index f8840cf78ab..339f2fb067f 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -1,14 +1,14 @@ //! `BeaconConsensusEngine` external API -use crate::{ - engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, - BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, -}; +use crate::{BeaconConsensusEngineEvent, BeaconForkChoiceUpdateError}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 03f7bf08b1e..bc6bd3bc4f4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -11,7 +11,11 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, + PayloadTypes, +}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, @@ -42,14 +46,8 @@ use tokio::sync::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; -mod message; -pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; - mod error; -pub use error::{ - BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, -}; +pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; mod invalid_headers; pub use invalid_headers::InvalidHeaderCache; @@ -60,9 +58,6 @@ pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; mod handle; pub use handle::BeaconConsensusEngineHandle; -mod forkchoice; -pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; - mod metrics; use metrics::EngineMetrics; diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 0ad4c595f1b..64daba2b453 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -2,7 +2,7 @@ use crate::{ engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, + EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ @@ -19,7 +19,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_engine_primitives::EngineApiMessageVersion; +use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 7cebd306309..2085aa81f9b 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -4,9 +4,8 @@ use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{BeaconEngineMessage, EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 93a9cf11ecc..4e4826be31d 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -16,9 +16,10 @@ use std::{ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; +use reth_engine_primitives::BeaconEngineMessage; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 008af450332..de4786553d3 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -16,9 +16,16 @@ reth-execution-types.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true reth-trie.workspace = true +reth-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } +futures.workspace = true # misc serde.workspace = true +thiserror.workspace = true diff --git a/crates/engine/primitives/src/error.rs b/crates/engine/primitives/src/error.rs new file mode 100644 index 00000000000..b7deb607bcf --- /dev/null +++ b/crates/engine/primitives/src/error.rs @@ -0,0 +1,20 @@ +/// Represents all error cases when handling a new payload. +/// +/// This represents all possible error cases that must be returned as JSON RCP errors back to the +/// beacon node. +#[derive(Debug, thiserror::Error)] +pub enum BeaconOnNewPayloadError { + /// Thrown when the engine task is unavailable/stopped. + #[error("beacon consensus engine task stopped")] + EngineUnavailable, + /// An internal error occurred, not necessarily related to the payload. + #[error(transparent)] + Internal(Box), +} + +impl BeaconOnNewPayloadError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs similarity index 98% rename from crates/consensus/beacon/src/engine/forkchoice.rs rename to crates/engine/primitives/src/forkchoice.rs index a9d9301738f..3c70b78ecdd 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/engine/primitives/src/forkchoice.rs @@ -58,13 +58,13 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] - pub(crate) fn is_latest_invalid(&self) -> bool { + pub fn is_latest_invalid(&self) -> bool { self.latest_status().map_or(false, |s| s.is_invalid()) } /// Returns the last valid head hash. #[allow(dead_code)] - pub(crate) fn last_valid_head(&self) -> Option { + pub fn last_valid_head(&self) -> Option { self.last_valid.as_ref().map(|s| s.head_block_hash) } @@ -188,7 +188,7 @@ pub enum ForkchoiceStateHash { impl ForkchoiceStateHash { /// Tries to find a matching hash in the given [`ForkchoiceState`]. - pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option { + pub fn find(state: &ForkchoiceState, hash: B256) -> Option { if state.head_block_hash == hash { Some(Self::Head(hash)) } else if state.safe_block_hash == hash { @@ -201,7 +201,7 @@ impl ForkchoiceStateHash { } /// Returns true if this is the head hash of the [`ForkchoiceState`] - pub(crate) const fn is_head(&self) -> bool { + pub const fn is_head(&self) -> bool { matches!(self, Self::Head(_)) } } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 949ebf0155c..3429edc2867 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -8,6 +8,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod error; +pub use error::BeaconOnNewPayloadError; + +mod forkchoice; +pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; + +mod message; +pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; + mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/engine/primitives/src/message.rs similarity index 98% rename from crates/consensus/beacon/src/engine/message.rs rename to crates/engine/primitives/src/message.rs index fa7457c1225..11fd383a1e2 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,10 +1,9 @@ -use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; +use crate::{BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, ForkchoiceStatus}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_payload_primitives::PayloadBuilderError; use std::{ diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index c6098bfe667..8359c453dcc 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -25,6 +25,7 @@ reth-stages-api.workspace = true reth-tasks.workspace = true reth-node-types.workspace = true reth-chainspec.workspace = true +reth-engine-primitives.workspace = true # async futures.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index d383af6caa6..cec9d981f1b 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,8 +1,9 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; +use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -145,6 +146,7 @@ mod tests { use super::*; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::execute::EthExecutorProvider; diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 914121adce5..005d4e54399 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,9 +7,9 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; +use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_primitives::SealedBlockWithSenders; use std::{ collections::HashSet, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index adc9230af34..67e692b5c6c 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -15,8 +15,7 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, - OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconConsensusEngineEvent, InvalidHeaderCache, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, @@ -27,7 +26,10 @@ use reth_chain_state::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + ForkchoiceStateTracker, OnForkChoiceUpdated, +}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; @@ -1246,11 +1248,11 @@ where } BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { let output = self.on_new_payload(payload, sidecar); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { - reth_beacon_consensus::BeaconOnNewPayloadError::Internal( - Box::new(e), - ) - })) { + if let Err(err) = + tx.send(output.map(|o| o.outcome).map_err(|e| { + BeaconOnNewPayloadError::Internal(Box::new(e)) + })) + { error!(target: "engine::tree", "Failed to send event: {err:?}"); self.metrics .engine @@ -2600,9 +2602,10 @@ mod tests { use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; - use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; + use reth_beacon_consensus::EthBeaconConsensus; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; + use reth_engine_primitives::ForkchoiceStatus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::test_utils::MockExecutorProvider; use reth_provider::test_utils::MockEthProvider; diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 07aa40165e2..6eb22340ec1 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -17,7 +17,6 @@ reth-errors.workspace = true reth-fs-util.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true -reth-beacon-consensus.workspace = true reth-payload-validator.workspace = true reth-evm.workspace = true reth-revm.workspace = true @@ -51,8 +50,7 @@ tracing.workspace = true [features] optimism = [ - "reth-beacon-consensus/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "revm-primitives/optimism" + "reth-primitives/optimism", + "reth-provider/optimism", + "revm-primitives/optimism", ] diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 6b584f0c1f5..efed83159b3 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -2,8 +2,7 @@ use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_fs_util as fs; use serde::{Deserialize, Serialize}; use std::{ diff --git a/crates/engine/util/src/lib.rs b/crates/engine/util/src/lib.rs index 26dc817fc95..42746c376cf 100644 --- a/crates/engine/util/src/lib.rs +++ b/crates/engine/util/src/lib.rs @@ -1,8 +1,7 @@ //! Collection of various stream utilities for consensus engine. use futures::Stream; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_payload_validator::ExecutionPayloadValidator; use std::path::PathBuf; use tokio_util::either::Either; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 0a4dd8d496f..ec69bbd0024 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -7,8 +7,10 @@ use alloy_rpc_types_engine::{ }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; -use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; use reth_evm::{ diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index adadfb595f8..daa39ad572d 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -1,8 +1,7 @@ //! Stream wrapper that skips specified number of FCUs. use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes, OnForkChoiceUpdated}; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index 16f2e98197c..ea89bdf6d10 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -2,8 +2,7 @@ use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 7a5b1cf3b02..4b4d912a27b 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -19,6 +19,7 @@ reth-stages.workspace = true reth-prune.workspace = true reth-static-file-types.workspace = true reth-primitives-traits.workspace = true +reth-engine-primitives.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 39c6355e36e..285e28d0f2e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -5,9 +5,8 @@ use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, -}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; +use reth_engine_primitives::ForkchoiceStatus; use reth_network_api::{NetworkEvent, PeersInfo}; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 666154f3b22..7773b5084c9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1023,8 +1023,9 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; + use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedBlock; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 677bd2fb246..82665ca35fd 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,7 +2,8 @@ use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; -use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; +use reth_beacon_consensus::BeaconForkChoiceUpdateError; +use reth_engine_primitives::BeaconOnNewPayloadError; use reth_payload_primitives::{EngineObjectValidationError, PayloadBuilderError}; use thiserror::Error; From cd9da550da5be4607966ca615f0ed7d9a73824c1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 11:31:14 +0100 Subject: [PATCH 490/970] chore: extract witness recorder helper type (#12566) --- crates/revm/Cargo.toml | 1 + crates/revm/src/lib.rs | 4 ++ crates/revm/src/witness.rs | 76 +++++++++++++++++++++++++++++++++++++ crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/debug.rs | 53 +++----------------------- 5 files changed, 87 insertions(+), 49 deletions(-) create mode 100644 crates/revm/src/witness.rs diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index bd2251e0333..d1202cd8b2c 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -44,6 +44,7 @@ std = [ "alloy-consensus/std", "reth-primitives-traits/std", ] +witness = ["dep:reth-trie"] test-utils = [ "dep:reth-trie", "reth-primitives/test-utils", diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index b06ee816f8d..5f18a0fe616 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -29,3 +29,7 @@ pub use revm::{self, *}; /// Either type for flexible usage of different database types in the same context. pub mod either; + +/// Helper types for execution witness generation. +#[cfg(feature = "witness")] +pub mod witness; diff --git a/crates/revm/src/witness.rs b/crates/revm/src/witness.rs new file mode 100644 index 00000000000..c40c87d324b --- /dev/null +++ b/crates/revm/src/witness.rs @@ -0,0 +1,76 @@ +use alloy_primitives::{keccak256, map::B256HashMap, Bytes, B256}; +use reth_trie::{HashedPostState, HashedStorage}; +use revm::State; + +/// Tracks state changes during execution. +#[derive(Debug, Clone, Default)] +pub struct ExecutionWitnessRecord { + /// Records all state changes + pub hashed_state: HashedPostState, + /// Map of all contract codes (created / accessed) to their preimages that were required during + /// the execution of the block, including during state root recomputation. + /// + /// `keccak(bytecodes) => bytecodes` + pub codes: B256HashMap, + /// Map of all hashed account and storage keys (addresses and slots) to their preimages + /// (unhashed account addresses and storage slots, respectively) that were required during + /// the execution of the block. during the execution of the block. + /// + /// `keccak(address|slot) => address|slot` + pub keys: B256HashMap, +} + +impl ExecutionWitnessRecord { + /// Records the state after execution. + pub fn record_executed_state(&mut self, statedb: &State) { + self.codes = statedb + .cache + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())) + .chain( + // cache state does not have all the contracts, especially when + // a contract is created within the block + // the contract only exists in bundle state, therefore we need + // to include them as well + statedb + .bundle_state + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())), + ) + .collect(); + + for (address, account) in &statedb.cache.accounts { + let hashed_address = keccak256(address); + self.hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + + let storage = self + .hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + if let Some(account) = &account.account { + self.keys.insert(hashed_address, address.to_vec().into()); + + for (slot, value) in &account.storage { + let slot = B256::from(*slot); + let hashed_slot = keccak256(slot); + storage.storage.insert(hashed_slot, *value); + + self.keys.insert(hashed_slot, slot.into()); + } + } + } + } + + /// Creates the record from the state after execution. + pub fn from_executed_state(state: &State) -> Self { + let mut record = Self::default(); + record.record_executed_state(state); + record + } +} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index ac3a548f9b5..5418cd1eb3a 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -23,7 +23,7 @@ reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index a74d1b5a155..78040b48c5f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -23,7 +23,7 @@ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, @@ -32,7 +32,6 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; -use reth_trie::{HashedPostState, HashedStorage}; use revm::{ db::{CacheDB, State}, primitives::{db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg}, @@ -40,7 +39,6 @@ use revm::{ use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; -use revm_primitives::{keccak256, HashMap}; use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; @@ -613,60 +611,19 @@ where let db = StateProviderDatabase::new(&state_provider); let block_executor = this.inner.block_executor.executor(db); - let mut hashed_state = HashedPostState::default(); - let mut keys = HashMap::default(); - let mut codes = HashMap::default(); + let mut witness_record = ExecutionWitnessRecord::default(); let _ = block_executor .execute_with_state_closure( (&(*block).clone().unseal(), block.difficulty).into(), |statedb: &State<_>| { - codes = statedb - .cache - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - statedb - .bundle_state - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())), - ) - .collect(); - - for (address, account) in &statedb.cache.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert( - hashed_address, - account.account.as_ref().map(|a| a.info.clone().into()), - ); - - let storage = - hashed_state.storages.entry(hashed_address).or_insert_with( - || HashedStorage::new(account.status.was_destroyed()), - ); - - if let Some(account) = &account.account { - keys.insert(hashed_address, address.to_vec().into()); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - - keys.insert(hashed_slot, slot.into()); - } - } - } + witness_record.record_executed_state(statedb); }, ) .map_err(|err| EthApiError::Internal(err.into()))?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = witness_record; + let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) From 72a52d5ea59f40c07f30a03f360508ea433d1255 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 14:42:58 +0400 Subject: [PATCH 491/970] feat: make `StaticFileProvider` generic over `NodePrimitives` (#12565) --- Cargo.lock | 1 + crates/cli/commands/src/common.rs | 2 +- crates/cli/commands/src/db/stats.rs | 11 ++- crates/cli/commands/src/init_state/mod.rs | 1 - .../commands/src/init_state/without_evm.rs | 28 ++++--- crates/cli/commands/src/stage/drop.rs | 20 ++--- crates/cli/commands/src/stage/run.rs | 7 +- crates/engine/tree/src/persistence.rs | 4 +- crates/node/builder/src/launch/common.rs | 4 +- crates/node/metrics/Cargo.toml | 1 + crates/node/metrics/src/hooks.rs | 16 ++-- .../cli/src/commands/import_receipts.rs | 4 +- .../optimism/cli/src/commands/init_state.rs | 1 - crates/primitives-traits/src/node.rs | 4 +- crates/prune/prune/src/builder.rs | 19 +++-- crates/prune/prune/src/segments/set.rs | 10 ++- .../prune/src/segments/static_file/headers.rs | 14 ++-- .../src/segments/static_file/receipts.rs | 18 +++-- .../src/segments/static_file/transactions.rs | 18 +++-- crates/stages/api/src/pipeline/mod.rs | 14 +--- crates/stages/stages/src/stages/bodies.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 19 ++--- crates/stages/stages/src/stages/mod.rs | 9 ++- .../stages/stages/src/test_utils/test_db.rs | 4 +- crates/storage/db-common/src/init.rs | 35 +++++---- .../src/providers/blockchain_provider.rs | 9 ++- .../provider/src/providers/consistent.rs | 4 +- .../provider/src/providers/database/mod.rs | 10 ++- .../src/providers/database/provider.rs | 10 ++- crates/storage/provider/src/providers/mod.rs | 4 +- .../provider/src/providers/static_file/jar.rs | 30 +++++--- .../src/providers/static_file/manager.rs | 76 ++++++++++++------- .../provider/src/providers/static_file/mod.rs | 22 +++--- .../src/providers/static_file/writer.rs | 54 ++++++++----- .../storage/provider/src/test_utils/noop.rs | 4 +- .../src/traits/static_file_provider.rs | 7 +- crates/storage/provider/src/writer/mod.rs | 45 ++++++----- .../provider/src/writer/static_file.rs | 3 +- 38 files changed, 324 insertions(+), 222 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d92f66e8a58..927e653a609 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8140,6 +8140,7 @@ dependencies = [ "reqwest", "reth-db-api", "reth-metrics", + "reth-primitives-traits", "reth-provider", "reth-tasks", "socket2", diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 49fee347ed4..3a9cbaa7fbb 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -109,7 +109,7 @@ impl> Environmen &self, config: &Config, db: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> eyre::Result>>> { let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); let prune_modes = diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index ac36b866b07..6865f01345e 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -9,7 +9,9 @@ use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Ta use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_builder::{ + NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, +}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; use reth_static_file_types::SegmentRangeInclusive; @@ -49,7 +51,7 @@ impl Command { println!("\n"); } - let static_files_stats_table = self.static_files_stats_table(data_dir)?; + let static_files_stats_table = self.static_files_stats_table::(data_dir)?; println!("{static_files_stats_table}"); println!("\n"); @@ -143,7 +145,7 @@ impl Command { Ok(table) } - fn static_files_stats_table( + fn static_files_stats_table( &self, data_dir: ChainPath, ) -> eyre::Result { @@ -173,7 +175,8 @@ impl Command { } let static_files = iter_static_files(data_dir.static_files())?; - let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?; + let static_file_provider = + StaticFileProvider::::read_only(data_dir.static_files(), false)?; let mut total_data_size = 0; let mut total_index_size = 0; diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index adaec3e8be3..adde88870fe 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -97,7 +97,6 @@ impl> InitStateC if last_block_number == 0 { without_evm::setup_without_evm( &provider_rw, - &static_file_provider, // &header, // header_hash, SealedHeader::new(header, header_hash), diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 29fc2aec60e..c6e1f9a51dd 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -2,11 +2,13 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; use alloy_consensus::Header; +use reth_node_builder::NodePrimitives; use reth_primitives::{ BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; use reth_provider::{ - providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, + providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, }; use reth_stages::{StageCheckpoint, StageId}; @@ -27,21 +29,21 @@ pub(crate) fn read_header_from_file(path: PathBuf) -> Result( provider_rw: &Provider, - static_file_provider: &StaticFileProvider, header: SealedHeader, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StageCheckpointWriter + BlockWriter, + Provider: StaticFileProviderFactory + StageCheckpointWriter + BlockWriter, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); + let static_file_provider = provider_rw.static_file_provider(); // Write EVM dummy data up to `header - 1` block - append_dummy_chain(static_file_provider, header.number - 1)?; + append_dummy_chain(&static_file_provider, header.number - 1)?; info!(target: "reth::cli", "Appending first valid block."); - append_first_block(provider_rw, static_file_provider, &header, total_difficulty)?; + append_first_block(provider_rw, &header, total_difficulty)?; for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; @@ -56,17 +58,21 @@ where /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_first_block( - provider_rw: impl BlockWriter, - sf_provider: &StaticFileProvider, +fn append_first_block( + provider_rw: &Provider, header: &SealedHeader, total_difficulty: U256, -) -> Result<(), eyre::Error> { +) -> Result<(), eyre::Error> +where + Provider: BlockWriter + StaticFileProviderFactory, +{ provider_rw.insert_block( SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) .expect("no senders or txes"), )?; + let sf_provider = provider_rw.static_file_provider(); + sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( header, total_difficulty, @@ -85,8 +91,8 @@ fn append_first_block( /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain( - sf_provider: &StaticFileProvider, +fn append_dummy_chain( + sf_provider: &StaticFileProvider, target_height: BlockNumber, ) -> Result<(), eyre::Error> { let (tx, rx) = std::sync::mpsc::channel(); diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 3a277cabd18..70b2caa8d16 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -12,7 +12,9 @@ use reth_db_common::{ }; use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; -use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory}; +use reth_provider::{ + writer::UnifiedStorageWriter, DatabaseProviderFactory, StaticFileProviderFactory, +}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -33,8 +35,6 @@ impl> Command ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let static_file_provider = provider_factory.static_file_provider(); - let tool = DbTool::new(provider_factory)?; let static_file_segment = match self.stage { @@ -60,7 +60,7 @@ impl> Command } } - let provider_rw = tool.provider_factory.provider_rw()?; + let provider_rw = tool.provider_factory.database_provider_rw()?; let tx = provider_rw.tx_ref(); match self.stage { @@ -71,7 +71,7 @@ impl> Command tx.clear::()?; reset_stage_checkpoint(tx, StageId::Headers)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Bodies => { tx.clear::()?; @@ -83,7 +83,7 @@ impl> Command tx.clear::()?; reset_stage_checkpoint(tx, StageId::Bodies)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Senders => { tx.clear::()?; @@ -104,7 +104,7 @@ impl> Command reset_stage_checkpoint(tx, StageId::Execution)?; let alloc = &self.env.chain.genesis().alloc; - insert_genesis_state(&provider_rw.0, alloc.iter())?; + insert_genesis_state(&provider_rw, alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; @@ -142,20 +142,20 @@ impl> Command reset_stage_checkpoint(tx, StageId::IndexAccountHistory)?; reset_stage_checkpoint(tx, StageId::IndexStorageHistory)?; - insert_genesis_history(&provider_rw.0, self.env.chain.genesis().alloc.iter())?; + insert_genesis_history(&provider_rw, self.env.chain.genesis().alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; reset_prune_checkpoint(tx, PruneSegment::TransactionLookup)?; reset_stage_checkpoint(tx, StageId::TransactionLookup)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } } tx.put::(StageId::Finish.to_string(), Default::default())?; - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(()) } diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 23d6f6f28ac..1ac2a12d6fa 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -329,10 +329,7 @@ impl> Command } if self.commit { - UnifiedStorageWriter::commit_unwind( - provider_rw, - provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } } @@ -355,7 +352,7 @@ impl> Command provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?; } if self.commit { - UnifiedStorageWriter::commit(provider_rw, provider_factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index f4650a047b4..e0c9e0362d0 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -120,7 +120,7 @@ impl PersistenceService { let new_tip_hash = provider_rw.block_hash(new_tip_num)?; UnifiedStorageWriter::from(&provider_rw, &sf_provider).remove_blocks_above(new_tip_num)?; - UnifiedStorageWriter::commit_unwind(provider_rw, sf_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; debug!(target: "engine::persistence", ?new_tip_num, ?new_tip_hash, "Removed blocks from disk"); self.metrics.remove_blocks_above_duration_seconds.record(start_time.elapsed()); @@ -142,7 +142,7 @@ impl PersistenceService { let static_file_provider = self.provider.static_file_provider(); UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; - UnifiedStorageWriter::commit(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit(provider_rw)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); Ok(last_block_hash_num) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 7fafa9e5eac..7e6571135f0 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -495,7 +495,7 @@ where } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider { self.right().static_file_provider() } @@ -766,7 +766,7 @@ where } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider<::Primitives> { self.provider_factory().static_file_provider() } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 9efdbd4959d..a823db9b467 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -9,6 +9,7 @@ repository.workspace = true [dependencies] reth-db-api.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-metrics.workspace = true reth-tasks.workspace = true diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs index 18755717667..21d12614f62 100644 --- a/crates/node/metrics/src/hooks.rs +++ b/crates/node/metrics/src/hooks.rs @@ -1,7 +1,12 @@ use metrics_process::Collector; use reth_db_api::database_metrics::DatabaseMetrics; +use reth_primitives_traits::NodePrimitives; use reth_provider::providers::StaticFileProvider; -use std::{fmt, sync::Arc}; +use std::{ + fmt::{self}, + sync::Arc, +}; + pub(crate) trait Hook: Fn() + Send + Sync {} impl Hook for T {} @@ -22,10 +27,11 @@ pub struct Hooks { impl Hooks { /// Create a new set of hooks - pub fn new( - db: Metrics, - static_file_provider: StaticFileProvider, - ) -> Self { + pub fn new(db: Metrics, static_file_provider: StaticFileProvider) -> Self + where + Metrics: DatabaseMetrics + 'static + Send + Sync, + N: NodePrimitives, + { let hooks: Vec>> = vec![ Box::new(move || db.report_metrics()), Box::new(move || { diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 838a99818e9..ca82cf73ea4 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -150,7 +150,7 @@ where } } - let provider = provider_factory.provider_rw()?; + let provider = provider_factory.database_provider_rw()?; let mut total_decoded_receipts = 0; let mut total_receipts = 0; let mut total_filtered_out_dup_txns = 0; @@ -247,7 +247,7 @@ where provider .save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?; - UnifiedStorageWriter::commit(provider, static_file_provider)?; + UnifiedStorageWriter::commit(provider)?; Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) } diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 68f5d9a585f..6be9b73c765 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -54,7 +54,6 @@ impl> InitStateCommandOp { if last_block_number == 0 { reth_cli_commands::init_state::without_evm::setup_without_evm( &provider_rw, - &static_file_provider, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), BEDROCK_HEADER_TTD, )?; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 9ca69274831..ca490ac15aa 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -3,7 +3,7 @@ use core::fmt; use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType}; /// Configures all the primitive types of the node. -pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { +pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { /// Block primitive. type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// Signed version of the transaction type. @@ -22,7 +22,7 @@ impl NodePrimitives for () { } /// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { +pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { /// Block primitive. type Block: FullBlock>; /// Signed version of the transaction type. diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 71d73c41610..85697160115 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -76,8 +76,11 @@ impl PrunerBuilder { /// Builds a [Pruner] from the current configuration with the given provider factory. pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner where - PF: DatabaseProviderFactory - + StaticFileProviderFactory, + PF: DatabaseProviderFactory< + ProviderRW: PruneCheckpointWriter + BlockReader + StaticFileProviderFactory, + > + StaticFileProviderFactory< + Primitives = ::Primitives, + >, { let segments = SegmentSet::from_components(provider_factory.static_file_provider(), self.segments); @@ -93,10 +96,16 @@ impl PrunerBuilder { } /// Builds a [Pruner] from the current configuration with the given static file provider. - pub fn build(self, static_file_provider: StaticFileProvider) -> Pruner + pub fn build( + self, + static_file_provider: StaticFileProvider, + ) -> Pruner where - Provider: - DBProvider + BlockReader + PruneCheckpointWriter + TransactionsProvider, + Provider: StaticFileProviderFactory + + DBProvider + + BlockReader + + PruneCheckpointWriter + + TransactionsProvider, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 23d03345b09..62c252fc54b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -5,7 +5,7 @@ use crate::segments::{ use reth_db::transaction::DbTxMut; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + StaticFileProviderFactory, TransactionsProvider, }; use reth_prune_types::PruneModes; @@ -45,12 +45,16 @@ impl SegmentSet { impl SegmentSet where - Provider: DBProvider + TransactionsProvider + PruneCheckpointWriter + BlockReader, + Provider: StaticFileProviderFactory + + DBProvider + + TransactionsProvider + + PruneCheckpointWriter + + BlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { let PruneModes { diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 8700a653b11..ea0264261af 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -12,7 +12,7 @@ use reth_db::{ tables, transaction::DbTxMut, }; -use reth_provider::{providers::StaticFileProvider, DBProvider}; +use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -24,17 +24,19 @@ use tracing::trace; const HEADER_TABLES_TO_PRUNE: usize = 3; #[derive(Debug)] -pub struct Headers { - static_file_provider: StaticFileProvider, +pub struct Headers { + static_file_provider: StaticFileProvider, } -impl Headers { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Headers { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl> Segment for Headers { +impl> Segment + for Headers +{ fn segment(&self) -> PruneSegment { PruneSegment::Headers } diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index f766f7ea1d3..5221418674a 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -5,25 +5,29 @@ use crate::{ use reth_db::transaction::DbTxMut; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, - PruneCheckpointWriter, TransactionsProvider, + PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use reth_static_file_types::StaticFileSegment; #[derive(Debug)] -pub struct Receipts { - static_file_provider: StaticFileProvider, +pub struct Receipts { + static_file_provider: StaticFileProvider, } -impl Receipts { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Receipts { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Receipts +impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: StaticFileProviderFactory + + DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 12772af5f88..7dc7a23191a 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -4,7 +4,10 @@ use crate::{ PrunerError, }; use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{providers::StaticFileProvider, BlockReader, DBProvider, TransactionsProvider}; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, + TransactionsProvider, +}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; @@ -12,19 +15,20 @@ use reth_static_file_types::StaticFileSegment; use tracing::trace; #[derive(Debug)] -pub struct Transactions { - static_file_provider: StaticFileProvider, +pub struct Transactions { + static_file_provider: StaticFileProvider, } -impl Transactions { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Transactions { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Transactions +impl Segment for Transactions where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: + DBProvider + TransactionsProvider + BlockReader + StaticFileProviderFactory, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 399a3ffb4b7..bcf857fbac8 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, - StageCheckpointWriter, StaticFileProviderFactory, + StageCheckpointWriter, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -358,10 +358,7 @@ impl Pipeline { ))?; } - UnifiedStorageWriter::commit_unwind( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; stage.post_unwind_commit()?; @@ -469,10 +466,7 @@ impl Pipeline { result: out.clone(), }); - UnifiedStorageWriter::commit( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit(provider_rw)?; stage.post_execute_commit()?; @@ -533,7 +527,7 @@ fn on_stage_error( prev_checkpoint.unwrap_or_default(), )?; - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; // We unwind because of a validation error. If the unwind itself // fails, we bail entirely, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 640bae86659..78aeda6feff 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -311,11 +311,11 @@ where fn missing_static_data_error( last_tx_num: TxNumber, - static_file_provider: &StaticFileProvider, + static_file_provider: &StaticFileProvider, provider: &Provider, ) -> Result where - Provider: BlockReader, + Provider: BlockReader + StaticFileProviderFactory, { let mut last_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Transactions) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 1750758a26a..16234ad483f 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -12,7 +12,7 @@ use reth_evm::{ use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives::{SealedHeader, StaticFileSegment}; -use reth_primitives_traits::format_gas_throughput; +use reth_primitives_traits::{format_gas_throughput, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, @@ -181,7 +181,8 @@ where + StatsReader + StateChangeWriter + BlockHashReader, - for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, + for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a, Provider::Primitives>>: + StateWriter, { /// Return the id of the stage fn id(&self) -> StageId { @@ -485,8 +486,8 @@ where } } -fn execution_checkpoint( - provider: &StaticFileProvider, +fn execution_checkpoint( + provider: &StaticFileProvider, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -552,8 +553,8 @@ fn execution_checkpoint( }) } -fn calculate_gas_used_from_headers( - provider: &StaticFileProvider, +fn calculate_gas_used_from_headers( + provider: &StaticFileProvider, range: RangeInclusive, ) -> Result { debug!(target: "sync::stages::execution", ?range, "Calculating gas used from headers"); @@ -587,11 +588,11 @@ fn calculate_gas_used_from_headers( /// (by returning [`StageError`]) until the heights in both the database and static file match. fn prepare_static_file_producer<'a, 'b, Provider>( provider: &'b Provider, - static_file_provider: &'a StaticFileProvider, + static_file_provider: &'a StaticFileProvider, start_block: u64, -) -> Result, StageError> +) -> Result, StageError> where - Provider: DBProvider + BlockReader + HeaderProvider, + Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, 'b: 'a, { // Get next expected receipt number diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 4b9f9295103..9d7cc685a7e 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -296,8 +296,8 @@ mod tests { ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. - let static_file_provider = - StaticFileProvider::read_write(db.factory.static_file_provider().path()).unwrap(); + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); // Simulate corruption by removing `prune_count` rows from the data file without updating // its offset list and configuration. @@ -314,9 +314,10 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert_eq!( - StaticFileProvider::read_write(db.factory.static_file_provider().path()) - .unwrap() + static_file_provider .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), Ok(expected) ); diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 52983cb6f69..772e9cb78d0 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -24,7 +24,7 @@ use reth_provider::{ }; use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; -use std::{collections::BTreeMap, path::Path}; +use std::{collections::BTreeMap, fmt::Debug, path::Path}; use tempfile::TempDir; /// Test database that is used for testing stage implementations. @@ -142,7 +142,7 @@ impl TestStageDB { /// Insert header to static file if `writer` exists, otherwise to DB. pub fn insert_header( - writer: Option<&mut StaticFileProviderRWRefMut<'_>>, + writer: Option<&mut StaticFileProviderRWRefMut<'_, ()>>, tx: &TX, header: &SealedHeader, td: U256, diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 45fb4b76b31..e14796d2686 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -10,9 +10,7 @@ use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; use reth_provider::{ - errors::provider::ProviderResult, - providers::{StaticFileProvider, StaticFileWriter}, - writer::UnifiedStorageWriter, + errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, @@ -72,7 +70,8 @@ impl From for InitDatabaseError { pub fn init_genesis(factory: &PF) -> Result where PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, - PF::ProviderRW: StageCheckpointWriter + PF::ProviderRW: StaticFileProviderFactory + + StageCheckpointWriter + HistoryWriter + HeaderProvider + HashingWriter @@ -114,8 +113,7 @@ where insert_genesis_history(&provider_rw, alloc.iter())?; // Insert header - let static_file_provider = factory.static_file_provider(); - insert_genesis_header(&provider_rw, &static_file_provider, &chain)?; + insert_genesis_header(&provider_rw, &chain)?; insert_genesis_state(&provider_rw, alloc.iter())?; @@ -124,6 +122,7 @@ where provider_rw.save_stage_checkpoint(stage, Default::default())?; } + let static_file_provider = provider_rw.static_file_provider(); // Static file segments start empty, so we need to initialize the genesis block. let segment = StaticFileSegment::Receipts; static_file_provider.latest_writer(segment)?.increment_block(0)?; @@ -133,7 +132,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(hash) } @@ -144,7 +143,11 @@ pub fn insert_genesis_state<'a, 'b, Provider>( alloc: impl Iterator, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + StateChangeWriter + + HeaderProvider + + AsRef, { insert_state(provider, alloc, 0) } @@ -156,7 +159,11 @@ pub fn insert_state<'a, 'b, Provider>( block: u64, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + StateChangeWriter + + HeaderProvider + + AsRef, { let capacity = alloc.size_hint().1.unwrap_or(0); let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); @@ -296,14 +303,14 @@ where /// Inserts header for the genesis state. pub fn insert_genesis_header( provider: &Provider, - static_file_provider: &StaticFileProvider, chain: &Spec, ) -> ProviderResult<()> where - Provider: DBProvider, + Provider: StaticFileProviderFactory + DBProvider, Spec: EthChainSpec, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); + let static_file_provider = provider.static_file_provider(); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { @@ -333,7 +340,8 @@ pub fn init_from_state_dump( etl_config: EtlConfig, ) -> eyre::Result where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + BlockNumReader + BlockHashReader + ChainSpecProvider @@ -457,7 +465,8 @@ fn dump_state( block: u64, ) -> Result<(), eyre::Error> where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + HeaderProvider + HashingWriter + HistoryWriter diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 0f0693471b0..083e7fb596b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -163,7 +163,9 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } impl StaticFileProviderFactory for BlockchainProvider2 { - fn static_file_provider(&self) -> StaticFileProvider { + type Primitives = N::Primitives; + + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } @@ -911,7 +913,7 @@ mod tests { )?; // Commit to both storages: database and static files - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; let provider = BlockchainProvider2::new(factory)?; @@ -999,8 +1001,7 @@ mod tests { UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) .save_blocks(&[lowest_memory_block]) .unwrap(); - UnifiedStorageWriter::commit(provider_rw, hook_provider.static_file_provider()) - .unwrap(); + UnifiedStorageWriter::commit(provider_rw).unwrap(); // Remove from memory hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash); diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 37c00be23be..3b2599f4999 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -612,7 +612,9 @@ impl ConsistentProvider { } impl StaticFileProviderFactory for ConsistentProvider { - fn static_file_provider(&self) -> StaticFileProvider { + type Primitives = N::Primitives; + + fn static_file_provider(&self) -> StaticFileProvider { self.storage_provider.static_file_provider() } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 0e193f8cdef..94c83bbb442 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -53,7 +53,7 @@ pub struct ProviderFactory { /// Chain spec chain_spec: Arc, /// Static File Provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, } @@ -78,7 +78,7 @@ impl ProviderFactory { pub fn new( db: N::DB, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> Self { Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } } @@ -114,7 +114,7 @@ impl>> ProviderFactory { path: P, chain_spec: Arc, args: DatabaseArguments, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> RethResult { Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), @@ -202,8 +202,10 @@ impl DatabaseProviderFactory for ProviderFactory { } impl StaticFileProviderFactory for ProviderFactory { + type Primitives = N::Primitives; + /// Returns static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 30a69fbfc77..b93112e7084 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -135,7 +135,7 @@ pub struct DatabaseProvider { /// Chain spec chain_spec: Arc, /// Static File provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, } @@ -199,8 +199,10 @@ impl DatabaseProvider { } impl StaticFileProviderFactory for DatabaseProvider { + type Primitives = N::Primitives; + /// Returns a static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } @@ -220,7 +222,7 @@ impl DatabaseProvider { pub const fn new_rw( tx: TX, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { Self { tx, chain_spec, static_file_provider, prune_modes } @@ -363,7 +365,7 @@ impl DatabaseProvider { pub const fn new( tx: TX, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { Self { tx, chain_spec, static_file_provider, prune_modes } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index d1e1822d8c9..3bf3e7b247f 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -204,7 +204,9 @@ impl DatabaseProviderFactory for BlockchainProvider { } impl StaticFileProviderFactory for BlockchainProvider { - fn static_file_provider(&self) -> StaticFileProvider { + type Primitives = N::Primitives; + + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 9c303394ed2..e87829b1133 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -12,39 +12,49 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, use reth_chainspec::ChainInfo; use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; use reth_db_api::models::CompactU256; +use reth_node_types::NodePrimitives; use reth_primitives::{ Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ + fmt::Debug, ops::{Deref, RangeBounds}, sync::Arc, }; /// Provider over a specific `NippyJar` and range. #[derive(Debug)] -pub struct StaticFileJarProvider<'a> { +pub struct StaticFileJarProvider<'a, N> { /// Main static file segment jar: LoadedJarRef<'a>, /// Another kind of static file segment to help query data from the main one. auxiliary_jar: Option>, + /// Metrics for the static files. metrics: Option>, + /// Node primitives + _pd: std::marker::PhantomData, } -impl<'a> Deref for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> Deref for StaticFileJarProvider<'a, N> { type Target = LoadedJarRef<'a>; fn deref(&self) -> &Self::Target { &self.jar } } -impl<'a> From> for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> From> for StaticFileJarProvider<'a, N> { fn from(value: LoadedJarRef<'a>) -> Self { - StaticFileJarProvider { jar: value, auxiliary_jar: None, metrics: None } + StaticFileJarProvider { + jar: value, + auxiliary_jar: None, + metrics: None, + _pd: Default::default(), + } } } -impl<'a> StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> StaticFileJarProvider<'a, N> { /// Provides a cursor for more granular data access. pub fn cursor<'b>(&'b self) -> ProviderResult> where @@ -76,7 +86,7 @@ impl<'a> StaticFileJarProvider<'a> { } } -impl HeaderProvider for StaticFileJarProvider<'_> { +impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? @@ -148,7 +158,7 @@ impl HeaderProvider for StaticFileJarProvider<'_> { } } -impl BlockHashReader for StaticFileJarProvider<'_> { +impl BlockHashReader for StaticFileJarProvider<'_, N> { fn block_hash(&self, number: u64) -> ProviderResult> { self.cursor()?.get_one::>(number.into()) } @@ -170,7 +180,7 @@ impl BlockHashReader for StaticFileJarProvider<'_> { } } -impl BlockNumReader for StaticFileJarProvider<'_> { +impl BlockNumReader for StaticFileJarProvider<'_, N> { fn chain_info(&self) -> ProviderResult { // Information on live database Err(ProviderError::UnsupportedProvider) @@ -195,7 +205,7 @@ impl BlockNumReader for StaticFileJarProvider<'_> { } } -impl TransactionsProvider for StaticFileJarProvider<'_> { +impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; @@ -291,7 +301,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { } } -impl ReceiptProvider for StaticFileJarProvider<'_> { +impl ReceiptProvider for StaticFileJarProvider<'_, N> { fn receipt(&self, num: TxNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index a5d4537245d..8f6a6957502 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -29,6 +29,7 @@ use reth_db_api::{ transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; +use reth_node_types::NodePrimitives; use reth_primitives::{ static_file::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, @@ -42,6 +43,8 @@ use reth_storage_api::DBProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt::Debug, + marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{mpsc, Arc}, @@ -77,10 +80,16 @@ impl StaticFileAccess { } /// [`StaticFileProvider`] manages all existing [`StaticFileJarProvider`]. -#[derive(Debug, Clone)] -pub struct StaticFileProvider(pub(crate) Arc); +#[derive(Debug)] +pub struct StaticFileProvider(pub(crate) Arc>); + +impl Clone for StaticFileProvider { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} -impl StaticFileProvider { +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); @@ -191,8 +200,8 @@ impl StaticFileProvider { } } -impl Deref for StaticFileProvider { - type Target = StaticFileProviderInner; +impl Deref for StaticFileProvider { + type Target = StaticFileProviderInner; fn deref(&self) -> &Self::Target { &self.0 @@ -201,7 +210,7 @@ impl Deref for StaticFileProvider { /// [`StaticFileProviderInner`] manages all existing [`StaticFileJarProvider`]. #[derive(Debug)] -pub struct StaticFileProviderInner { +pub struct StaticFileProviderInner { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, @@ -212,7 +221,8 @@ pub struct StaticFileProviderInner { /// Directory where `static_files` are located path: PathBuf, /// Maintains a writer set of [`StaticFileSegment`]. - writers: StaticFileWriters, + writers: StaticFileWriters, + /// Metrics for the static files. metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, @@ -220,9 +230,11 @@ pub struct StaticFileProviderInner { blocks_per_file: u64, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, + /// Node primitives + _pd: PhantomData, } -impl StaticFileProviderInner { +impl StaticFileProviderInner { /// Creates a new [`StaticFileProviderInner`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let _lock_file = if access.is_read_write() { @@ -241,6 +253,7 @@ impl StaticFileProviderInner { access, blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, _lock_file, + _pd: Default::default(), }; Ok(provider) @@ -257,7 +270,7 @@ impl StaticFileProviderInner { } } -impl StaticFileProvider { +impl StaticFileProvider { /// Set a custom number of blocks per file. #[cfg(any(test, feature = "test-utils"))] pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { @@ -323,7 +336,7 @@ impl StaticFileProvider { segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_block(segment, block), @@ -338,7 +351,7 @@ impl StaticFileProvider { segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_transaction(segment, tx), @@ -355,7 +368,7 @@ impl StaticFileProvider { segment: StaticFileSegment, fn_range: impl Fn() -> Option, path: Option<&Path>, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // If we have a path, then get the block range from its name. // Otherwise, check `self.available_static_files` let block_range = match path { @@ -426,12 +439,12 @@ impl StaticFileProvider { &self, segment: StaticFileSegment, fixed_block_range: &SegmentRangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult> { let key = (fixed_block_range.end(), segment); // Avoid using `entry` directly to avoid a write lock in the common case. trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Getting provider"); - let mut provider: StaticFileJarProvider<'_> = if let Some(jar) = self.map.get(&key) { + let mut provider: StaticFileJarProvider<'_, N> = if let Some(jar) = self.map.get(&key) { trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Jar found in cache"); jar.into() } else { @@ -924,7 +937,7 @@ impl StaticFileProvider { pub fn find_static_file( &self, segment: StaticFileSegment, - func: impl Fn(StaticFileJarProvider<'_>) -> ProviderResult>, + func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { if let Some(highest_block) = self.get_highest_static_file_block(segment) { let mut range = self.find_fixed_range(highest_block); @@ -1167,30 +1180,35 @@ impl StaticFileProvider { /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc ProviderResult>; + ) -> ProviderResult>; /// Returns a mutable reference to a [`StaticFileProviderRW`] of the latest /// [`StaticFileSegment`]. fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Commits all changes of all [`StaticFileProviderRW`] of all [`StaticFileSegment`]. fn commit(&self) -> ProviderResult<()>; } -impl StaticFileWriter for StaticFileProvider { +impl StaticFileWriter for StaticFileProvider { + type Primitives = N; + fn get_writer( &self, block: BlockNumber, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { if self.access.is_read_only() { return Err(ProviderError::ReadOnlyStaticFileAccess) } @@ -1204,7 +1222,7 @@ impl StaticFileWriter for StaticFileProvider { fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_writer(self.get_highest_static_file_block(segment).unwrap_or_default(), segment) } @@ -1213,7 +1231,7 @@ impl StaticFileWriter for StaticFileProvider { } } -impl HeaderProvider for StaticFileProvider { +impl HeaderProvider for StaticFileProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider @@ -1300,7 +1318,7 @@ impl HeaderProvider for StaticFileProvider { } } -impl BlockHashReader for StaticFileProvider { +impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None)?.block_hash(num) } @@ -1319,7 +1337,7 @@ impl BlockHashReader for StaticFileProvider { } } -impl ReceiptProvider for StaticFileProvider { +impl ReceiptProvider for StaticFileProvider { fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) @@ -1356,7 +1374,7 @@ impl ReceiptProvider for StaticFileProvider { } } -impl TransactionsProviderExt for StaticFileProvider { +impl TransactionsProviderExt for StaticFileProvider { fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1417,7 +1435,7 @@ impl TransactionsProviderExt for StaticFileProvider { } } -impl TransactionsProvider for StaticFileProvider { +impl TransactionsProvider for StaticFileProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; @@ -1529,7 +1547,7 @@ impl TransactionsProvider for StaticFileProvider { /* Cannot be successfully implemented but must exist for trait requirements */ -impl BlockNumReader for StaticFileProvider { +impl BlockNumReader for StaticFileProvider { fn chain_info(&self) -> ProviderResult { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) @@ -1551,7 +1569,7 @@ impl BlockNumReader for StaticFileProvider { } } -impl BlockReader for StaticFileProvider { +impl BlockReader for StaticFileProvider { fn find_block_by_hash( &self, _hash: B256, @@ -1629,7 +1647,7 @@ impl BlockReader for StaticFileProvider { } } -impl WithdrawalsProvider for StaticFileProvider { +impl WithdrawalsProvider for StaticFileProvider { fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -1645,7 +1663,7 @@ impl WithdrawalsProvider for StaticFileProvider { } } -impl StatsReader for StaticFileProvider { +impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { tables::CanonicalHeaders::NAME | diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index dd52adf52f8..30b8d0344da 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -55,7 +55,9 @@ impl Deref for LoadedJar { #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::create_test_provider_factory, HeaderProvider}; + use crate::{ + test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, + }; use alloy_consensus::{Header, Transaction}; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; @@ -116,7 +118,7 @@ mod tests { // Create StaticFile { - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = factory.static_file_provider(); let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); let mut td = U256::ZERO; @@ -131,7 +133,7 @@ mod tests { // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = db_provider.static_file_provider(); let jar_provider = manager .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); @@ -170,7 +172,7 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::read_write(&static_dir) + let sf_rw = StaticFileProvider::<()>::read_write(&static_dir) .expect("Failed to create static file provider") .with_custom_blocks_per_file(blocks_per_file); @@ -189,8 +191,8 @@ mod tests { // Helper function to prune headers and validate truncation results fn prune_and_validate( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, ()>, + sf_rw: &StaticFileProvider<()>, static_dir: impl AsRef, prune_count: u64, expected_tip: Option, @@ -302,13 +304,13 @@ mod tests { /// * `10..=19`: no txs/receipts /// * `20..=29`: only one tx/receipt fn setup_tx_based_scenario( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider<()>, segment: StaticFileSegment, blocks_per_file: u64, ) { fn setup_block_ranges( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, ()>, + sf_rw: &StaticFileProvider<()>, segment: StaticFileSegment, block_range: &Range, mut tx_count: u64, @@ -413,7 +415,7 @@ mod tests { #[allow(clippy::too_many_arguments)] fn prune_and_validate( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider<()>, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 2e54fb943a7..796e16c9a13 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -8,6 +8,7 @@ use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; +use reth_node_types::NodePrimitives; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, Receipt, StaticFileSegment, @@ -15,6 +16,7 @@ use reth_primitives::{ use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ borrow::Borrow, + fmt::Debug, path::{Path, PathBuf}, sync::{Arc, Weak}, time::Instant, @@ -25,19 +27,29 @@ use tracing::debug; /// /// WARNING: Trying to use more than one writer for the same segment type **will result in a /// deadlock**. -#[derive(Debug, Default)] -pub(crate) struct StaticFileWriters { - headers: RwLock>, - transactions: RwLock>, - receipts: RwLock>, +#[derive(Debug)] +pub(crate) struct StaticFileWriters { + headers: RwLock>>, + transactions: RwLock>>, + receipts: RwLock>>, +} + +impl Default for StaticFileWriters { + fn default() -> Self { + Self { + headers: Default::default(), + transactions: Default::default(), + receipts: Default::default(), + } + } } -impl StaticFileWriters { +impl StaticFileWriters { pub(crate) fn get_or_create( &self, segment: StaticFileSegment, - create_fn: impl FnOnce() -> ProviderResult, - ) -> ProviderResult> { + create_fn: impl FnOnce() -> ProviderResult>, + ) -> ProviderResult> { let mut write_guard = match segment { StaticFileSegment::Headers => self.headers.write(), StaticFileSegment::Transactions => self.transactions.write(), @@ -64,19 +76,19 @@ impl StaticFileWriters { /// Mutable reference to a [`StaticFileProviderRW`] behind a [`RwLockWriteGuard`]. #[derive(Debug)] -pub struct StaticFileProviderRWRefMut<'a>( - pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>, +pub struct StaticFileProviderRWRefMut<'a, N>( + pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>>, ); -impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_> { +impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_, N> { fn deref_mut(&mut self) -> &mut Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] self.0.as_mut().expect("static file writer provider should be init") } } -impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { - type Target = StaticFileProviderRW; +impl std::ops::Deref for StaticFileProviderRWRefMut<'_, N> { + type Target = StaticFileProviderRW; fn deref(&self) -> &Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] @@ -86,11 +98,11 @@ impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { #[derive(Debug)] /// Extends `StaticFileProvider` with writing capabilities -pub struct StaticFileProviderRW { +pub struct StaticFileProviderRW { /// Reference back to the provider. We need [Weak] here because [`StaticFileProviderRW`] is /// stored in a [`dashmap::DashMap`] inside the parent [`StaticFileProvider`].which is an /// [Arc]. If we were to use an [Arc] here, we would create a reference cycle. - reader: Weak, + reader: Weak>, /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, /// Path to opened file. @@ -104,7 +116,7 @@ pub struct StaticFileProviderRW { prune_on_commit: Option<(u64, Option)>, } -impl StaticFileProviderRW { +impl StaticFileProviderRW { /// Creates a new [`StaticFileProviderRW`] for a [`StaticFileSegment`]. /// /// Before use, transaction based segments should ensure the block end range is the expected @@ -112,7 +124,7 @@ impl StaticFileProviderRW { pub fn new( segment: StaticFileSegment, block: BlockNumber, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; @@ -133,7 +145,7 @@ impl StaticFileProviderRW { fn open( segment: StaticFileSegment, block: u64, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult<(NippyJarWriter, PathBuf)> { let start = Instant::now(); @@ -751,7 +763,7 @@ impl StaticFileProviderRW { Ok(()) } - fn reader(&self) -> StaticFileProvider { + fn reader(&self) -> StaticFileProvider { Self::upgrade_provider_to_strong_reference(&self.reader) } @@ -764,8 +776,8 @@ impl StaticFileProviderRW { /// active. In reality, it's impossible to detach the [`StaticFileProviderRW`] from the /// [`StaticFileProvider`]. fn upgrade_provider_to_strong_reference( - provider: &Weak, - ) -> StaticFileProvider { + provider: &Weak>, + ) -> StaticFileProvider { provider.upgrade().map(StaticFileProvider).expect("StaticFileProvider is dropped") } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 38fab0dc311..d12539a2c27 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -556,7 +556,9 @@ impl PruneCheckpointReader for NoopProvider { } impl StaticFileProviderFactory for NoopProvider { - fn static_file_provider(&self) -> StaticFileProvider { + type Primitives = (); + + fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } } diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index 24d69569205..d465121fb46 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,7 +1,12 @@ +use reth_node_types::NodePrimitives; + use crate::providers::StaticFileProvider; /// Static file provider factory. pub trait StaticFileProviderFactory { + /// The network primitives type [`StaticFileProvider`] is using. + type Primitives: NodePrimitives; + /// Create new instance of static file provider. - fn static_file_provider(&self) -> StaticFileProvider; + fn static_file_provider(&self) -> StaticFileProvider; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 0fbec6c1b88..1c3894e9cfd 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,7 +1,8 @@ use crate::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, writer::static_file::StaticFileWriter, - BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, + StaticFileProviderFactory, TrieWriter, }; use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256, U256}; @@ -115,15 +116,13 @@ impl UnifiedStorageWriter<'_, (), ()> { /// start-up. /// /// NOTE: If unwinding data from storage, use `commit_unwind` instead! - pub fn commit

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { + let static_file = provider.static_file_provider(); static_file.commit()?; - database.into().into_tx().commit()?; + provider.commit()?; Ok(()) } @@ -135,20 +134,18 @@ impl UnifiedStorageWriter<'_, (), ()> { /// checkpoints on the next start-up. /// /// NOTE: Should only be used after unwinding data from storage! - pub fn commit_unwind

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit_unwind

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { - database.into().into_tx().commit()?; + let static_file = provider.static_file_provider(); + provider.commit()?; static_file.commit()?; Ok(()) } } -impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> +impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider + BlockWriter @@ -158,7 +155,8 @@ where + HistoryWriter + StageCheckpointWriter + BlockExecutionWriter - + AsRef, + + AsRef + + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { @@ -319,9 +317,10 @@ where } } -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> +impl + UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> where - ProviderDB: DBProvider + HeaderProvider, + ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, { /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. /// @@ -430,9 +429,10 @@ where } } -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> +impl + UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> where - ProviderDB: DBProvider + HeaderProvider, + ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, { /// Appends receipts block by block. /// @@ -512,9 +512,12 @@ where } impl StateWriter - for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> + for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> where - ProviderDB: DBProvider + StateChangeWriter + HeaderProvider, + ProviderDB: DBProvider + + StateChangeWriter + + HeaderProvider + + StaticFileProviderFactory, { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs index 5514e211e58..f7227d21ef3 100644 --- a/crates/storage/provider/src/writer/static_file.rs +++ b/crates/storage/provider/src/writer/static_file.rs @@ -1,12 +1,13 @@ use crate::providers::StaticFileProviderRWRefMut; use alloy_primitives::{BlockNumber, TxNumber}; use reth_errors::ProviderResult; +use reth_node_types::NodePrimitives; use reth_primitives::Receipt; use reth_storage_api::ReceiptWriter; pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); -impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_>> { +impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_, N>> { fn append_block_receipts( &mut self, first_tx_index: TxNumber, From 305a1cee0bf6660bea22b5d83189c84d78398b70 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 11:54:02 +0100 Subject: [PATCH 492/970] feat: introduce debug witness api (#12567) Co-authored-by: Oliver --- crates/rpc/rpc-api/src/debug.rs | 23 +++++++++++++++++++++++ crates/rpc/rpc-api/src/lib.rs | 4 ++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 76316fa71f4..52b63fe3021 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -385,3 +385,26 @@ pub trait DebugApi { #[method(name = "writeMutexProfile")] async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>; } + +/// An extension to the `debug_` namespace that provides additional methods for retrieving +/// witnesses. +/// +/// This is separate from the regular `debug_` api, because this depends on the network specific +/// params. For optimism this will expect the optimism specific payload attributes +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))] +pub trait DebugExecutionWitnessApi { + /// The `debug_executePayload` method allows for re-execution of a group of transactions with + /// the purpose of generating an execution witness. The witness comprises of a map of all + /// hashed trie nodes to their preimages that were required during the execution of the block, + /// including during state root recomputation. + /// + /// The first argument is the block number or block hash. The second argument is the payload + /// attributes for the new block. The third argument is a list of transactions to be included. + #[method(name = "executePayload")] + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; +} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 73775112dcf..ac39b4802a8 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -37,7 +37,7 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - debug::DebugApiServer, + debug::{DebugApiServer, DebugExecutionWitnessApiServer}, engine::{EngineApiServer, EngineEthApiServer}, mev::{MevFullApiServer, MevSimApiServer}, net::NetApiServer, @@ -65,7 +65,7 @@ pub mod clients { pub use crate::{ admin::AdminApiClient, anvil::AnvilApiClient, - debug::DebugApiClient, + debug::{DebugApiClient, DebugExecutionWitnessApiClient}, engine::{EngineApiClient, EngineEthApiClient}, ganache::GanacheApiClient, hardhat::HardhatApiClient, From efa350d28df77630867762c4fbd6872193a0c388 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 12:44:16 +0100 Subject: [PATCH 493/970] ci: exclude more crates for op tests (#12568) --- .github/workflows/unit.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index defd9a6f535..e89ad903d80 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -35,11 +35,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" partition: 2 total_partitions: 2 - type: book From 56826cbdbbf84ccbea41ff6ac049b27b024d5a48 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 16:35:35 +0400 Subject: [PATCH 494/970] fix: remove redundant check in bodies stage (#12569) --- crates/stages/api/src/error.rs | 13 +------------ crates/stages/stages/src/stages/bodies.rs | 12 +----------- .../static-file/src/segments/headers.rs | 4 +--- .../static-file/src/segments/receipts.rs | 3 +-- .../static-file/src/segments/transactions.rs | 3 +-- .../provider/src/providers/static_file/writer.rs | 15 ++++++--------- 6 files changed, 11 insertions(+), 39 deletions(-) diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 8562b10b6a5..b12f5186f3b 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,5 +1,5 @@ use crate::PipelineEvent; -use alloy_primitives::{BlockNumber, TxNumber}; +use alloy_primitives::TxNumber; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; @@ -112,16 +112,6 @@ pub enum StageError { /// Expected static file transaction number. static_file: TxNumber, }, - /// Unrecoverable inconsistency error related to a block number in a static file segment. - #[error("inconsistent block number for {segment}. db: {database}, static_file: {static_file}")] - InconsistentBlockNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database block number. - database: BlockNumber, - /// Expected static file block number. - static_file: BlockNumber, - }, /// The prune checkpoint for the given segment is missing. #[error("missing prune checkpoint for {0}")] MissingPruneCheckpoint(PruneSegment), @@ -156,7 +146,6 @@ impl StageError { Self::MissingDownloadBuffer | Self::MissingSyncGap | Self::ChannelClosed | - Self::InconsistentBlockNumber { .. } | Self::InconsistentTxNumber { .. } | Self::Internal(_) | Self::Fatal(_) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 78aeda6feff..48bc679f5bd 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -176,17 +176,7 @@ where // Increment block on static file header. if block_number > 0 { - let appended_block_number = static_file_producer.increment_block(block_number)?; - - if appended_block_number != block_number { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentBlockNumber { - segment: StaticFileSegment::Transactions, - database: block_number, - static_file: appended_block_number, - }) - } + static_file_producer.increment_block(block_number)?; } match response { diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 650f4998764..e06e1f09a17 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -46,9 +46,7 @@ impl Segment for Hea debug_assert_eq!(header_block, header_td_block); debug_assert_eq!(header_td_block, canonical_header_block); - let _static_file_block = - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; - debug_assert_eq!(_static_file_block, header_block); + static_file_writer.append_header(&header, header_td.0, &canonical_header)?; } Ok(()) diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 0442c360099..bd808b4d839 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -30,8 +30,7 @@ impl Segment Segment StaticFileProviderRW { /// and create the next one if we are past the end range. /// /// Returns the current [`BlockNumber`] as seen in the static file. - pub fn increment_block( - &mut self, - expected_block_number: BlockNumber, - ) -> ProviderResult { + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); self.check_next_block_number(expected_block_number)?; @@ -350,7 +347,7 @@ impl StaticFileProviderRW { } } - let block = self.writer.user_header_mut().increment_block(); + self.writer.user_header_mut().increment_block(); if let Some(metrics) = &self.metrics { metrics.record_segment_operation( segment, @@ -359,7 +356,7 @@ impl StaticFileProviderRW { ); } - Ok(block) + Ok(()) } /// Verifies if the incoming block number matches the next expected block number @@ -524,13 +521,13 @@ impl StaticFileProviderRW { header: &Header, total_difficulty: U256, hash: &BlockHash, - ) -> ProviderResult { + ) -> ProviderResult<()> { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - let block_number = self.increment_block(header.number)?; + self.increment_block(header.number)?; self.append_column(header)?; self.append_column(CompactU256::from(total_difficulty))?; @@ -544,7 +541,7 @@ impl StaticFileProviderRW { ); } - Ok(block_number) + Ok(()) } /// Appends transaction to static file. From 5f66fa448ef5f19d1e9d122d140b61370d330a1c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Nov 2024 14:31:35 +0100 Subject: [PATCH 495/970] chore(sdk): improve usability `TxType` trait (#12548) --- Cargo.lock | 1 + crates/optimism/primitives/Cargo.toml | 1 + crates/optimism/primitives/src/lib.rs | 2 +- .../src/{op_tx_type.rs => tx_type.rs} | 33 ++++++++++++++++-- crates/primitives-traits/src/tx_type.rs | 34 +++++++------------ crates/primitives/src/transaction/tx_type.rs | 27 +++++++++++++++ 6 files changed, 74 insertions(+), 24 deletions(-) rename crates/optimism/primitives/src/{op_tx_type.rs => tx_type.rs} (90%) diff --git a/Cargo.lock b/Cargo.lock index 927e653a609..6ccf3cdabb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8381,6 +8381,7 @@ dependencies = [ "bytes", "derive_more 1.0.0", "op-alloy-consensus", + "reth-primitives-traits", ] [[package]] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index a6f36732672..48f5877182f 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -19,3 +19,4 @@ alloy-eips.workspace = true alloy-rlp.workspace = true derive_more.workspace = true bytes.workspace = true +reth-primitives-traits.workspace = true \ No newline at end of file diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index f8d8e511498..a0745e7ac7d 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -8,4 +8,4 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod bedrock; -pub mod op_tx_type; +pub mod tx_type; diff --git a/crates/optimism/primitives/src/op_tx_type.rs b/crates/optimism/primitives/src/tx_type.rs similarity index 90% rename from crates/optimism/primitives/src/op_tx_type.rs rename to crates/optimism/primitives/src/tx_type.rs index b317bb05c9c..8536d352547 100644 --- a/crates/optimism/primitives/src/op_tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -2,22 +2,51 @@ //! `OpTxType` implements `reth_primitives_traits::TxType`. //! This type is required because a `Compact` impl is needed on the deposit tx type. +use core::fmt::Debug; +use std::convert::TryFrom; + use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; -use core::fmt::Debug; use derive_more::{ derive::{From, Into}, Display, }; use op_alloy_consensus::OpTxType as AlloyOpTxType; -use std::convert::TryFrom; +use reth_primitives_traits::TxType; /// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] #[into(u8)] pub struct OpTxType(AlloyOpTxType); +impl TxType for OpTxType { + #[inline] + fn is_legacy(&self) -> bool { + matches!(self.0, AlloyOpTxType::Legacy) + } + + #[inline] + fn is_eip2930(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip2930) + } + + #[inline] + fn is_eip1559(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip1559) + } + + #[inline] + fn is_eip4844(&self) -> bool { + false + } + + #[inline] + fn is_eip7702(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip7702) + } +} + impl From for U8 { fn from(tx_type: OpTxType) -> Self { Self::from(u8::from(tx_type)) diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index 078d8ac947b..b1828ad57d9 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -30,26 +30,18 @@ pub trait TxType: + alloy_rlp::Encodable + alloy_rlp::Decodable { -} + /// Returns `true` if this is a legacy transaction. + fn is_legacy(&self) -> bool; -impl TxType for T where - T: Send - + Sync - + Unpin - + Clone - + Copy - + Default - + fmt::Debug - + fmt::Display - + PartialEq - + Eq - + PartialEq - + Into - + Into - + TryFrom - + TryFrom - + TryFrom - + alloy_rlp::Encodable - + alloy_rlp::Decodable -{ + /// Returns `true` if this is an eip-2930 transaction. + fn is_eip2930(&self) -> bool; + + /// Returns `true` if this is an eip-1559 transaction. + fn is_eip1559(&self) -> bool; + + /// Returns `true` if this is an eip-4844 transaction. + fn is_eip4844(&self) -> bool; + + /// Returns `true` if this is an eip-7702 transaction. + fn is_eip7702(&self) -> bool; } diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 46e37086113..66fb7df5d64 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -91,6 +91,33 @@ impl TxType { } } +impl reth_primitives_traits::TxType for TxType { + #[inline] + fn is_legacy(&self) -> bool { + matches!(self, Self::Legacy) + } + + #[inline] + fn is_eip2930(&self) -> bool { + matches!(self, Self::Eip2930) + } + + #[inline] + fn is_eip1559(&self) -> bool { + matches!(self, Self::Eip1559) + } + + #[inline] + fn is_eip4844(&self) -> bool { + matches!(self, Self::Eip4844) + } + + #[inline] + fn is_eip7702(&self) -> bool { + matches!(self, Self::Eip7702) + } +} + impl From for u8 { fn from(value: TxType) -> Self { match value { From ac5976ff51251d59d726517278e7555c57a7c7d6 Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Fri, 15 Nov 2024 21:35:19 +0530 Subject: [PATCH 496/970] feat: implement Compact for OpTxType (#12537) --- Cargo.lock | 3 + crates/optimism/primitives/Cargo.toml | 12 ++- crates/optimism/primitives/src/tx_type.rs | 93 ++++++++++++++++++++ crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/transaction/tx_type.rs | 8 +- 5 files changed, 112 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ccf3cdabb1..ace45858d20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8381,7 +8381,10 @@ dependencies = [ "bytes", "derive_more 1.0.0", "op-alloy-consensus", + "reth-codecs", + "reth-primitives", "reth-primitives-traits", + "rstest", ] [[package]] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 48f5877182f..216e559a201 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -19,4 +19,14 @@ alloy-eips.workspace = true alloy-rlp.workspace = true derive_more.workspace = true bytes.workspace = true -reth-primitives-traits.workspace = true \ No newline at end of file +reth-primitives-traits.workspace = true +reth-codecs = { workspace = true, optional = true } +reth-primitives = { workspace = true, features = ["reth-codec"], optional = true } + +[features] +default = ["reth-codec"] +reth-codec = ["dep:reth-codecs", "dep:reth-primitives"] + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +rstest.workspace = true \ No newline at end of file diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index 8536d352547..1b505920120 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -15,6 +15,16 @@ use derive_more::{ use op_alloy_consensus::OpTxType as AlloyOpTxType; use reth_primitives_traits::TxType; +#[cfg(feature = "reth-codec")] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; +#[cfg(feature = "reth-codec")] +use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; +#[cfg(feature = "reth-codec")] +use reth_primitives::transaction::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; + /// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] #[into(u8)] @@ -123,10 +133,55 @@ impl Decodable for OpTxType { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for OpTxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match self.0 { + AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, + AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + AlloyOpTxType::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + AlloyOpTxType::Deposit => { + buf.put_u8(DEPOSIT_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), + COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), + COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP7702_TX_TYPE_ID => Self(AlloyOpTxType::Eip7702), + DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), + _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for OpTxType: {identifier}"), + }, + buf, + ) + } +} + #[cfg(test)] mod tests { use super::*; use bytes::BytesMut; + use reth_codecs::Compact; + use rstest::rstest; #[test] fn test_from_alloy_op_tx_type() { @@ -215,4 +270,42 @@ mod tests { let result = OpTxType::decode(&mut buf); assert!(result.is_err()); } + + #[rstest] + #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: OpTxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!( + identifier, expected_identifier, + "Unexpected identifier for OpTxType {tx_type:?}", + ); + assert_eq!(buf, expected_buf, "Unexpected buffer for OpTxType {tx_type:?}",); + } + + #[rstest] + #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: OpTxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = OpTxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e5e4517d9dd..f325b72776f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -62,7 +62,7 @@ use op_alloy_consensus::TxDeposit; #[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] -use tx_type::{ +pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, }; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 66fb7df5d64..3445cb184c1 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -9,21 +9,21 @@ use serde::{Deserialize, Serialize}; /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; /// Identifier parameter for EIP-2930 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP2930: usize = 1; +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; /// Identifier parameter for EIP-1559 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier /// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is /// read from the buffer as a single byte. #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. #[cfg(feature = "optimism")] From 841267d1b28e4f315012cdf8373212a7cde690ca Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 17:16:55 +0100 Subject: [PATCH 497/970] feat: add helpers to obtain the execution witness for a payload (#12573) --- Cargo.lock | 1 + crates/optimism/payload/Cargo.toml | 3 +- crates/optimism/payload/src/builder.rs | 96 ++++++++++++++++++++------ 3 files changed, 76 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ace45858d20..650dc4607d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8344,6 +8344,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", "op-alloy-consensus", "op-alloy-rpc-types-engine", diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 839355b2158..646df040e19 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types-compat.workspace = true @@ -41,6 +41,7 @@ alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true # misc diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index beb9a5c4ae2..2542ff527df 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -5,6 +5,7 @@ use std::{fmt::Display, sync::Arc}; use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::merge::BEACON_NONCE; use alloy_primitives::{Address, Bytes, U256}; +use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; @@ -20,7 +21,7 @@ use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType, }; -use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; +use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ noop::NoopTransactionPool, BestTransactionsAttributes, PayloadTransactions, TransactionPool, @@ -38,6 +39,7 @@ use crate::{ payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; +use reth_revm::witness::ExecutionWitnessRecord; use reth_transaction_pool::pool::BestPayloadTransactions; /// Optimism's payload builder @@ -234,36 +236,33 @@ where Pool: TransactionPool, Txs: OpPayloadTransactions, { - /// Builds the payload on top of the state. - pub fn build( + /// Executes the payload and returns the outcome. + pub fn execute( self, - mut db: State, - ctx: OpPayloadBuilderCtx, - ) -> Result, PayloadBuilderError> + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm

, - DB: Database + AsRef

, - P: StateRootProvider, + DB: Database, { let Self { pool, best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); // 1. apply eip-4788 pre block contract call - ctx.apply_pre_beacon_root_contract_call(&mut db)?; + ctx.apply_pre_beacon_root_contract_call(state)?; // 2. ensure create2deployer is force deployed - ctx.ensure_create2_deployer(&mut db)?; + ctx.ensure_create2_deployer(state)?; // 3. execute sequencer transactions - let mut info = ctx.execute_sequencer_transactions(&mut db)?; + let mut info = ctx.execute_sequencer_transactions(state)?; // 4. if mem pool transactions are requested we execute them if !ctx.attributes().no_tx_pool { let best_txs = best.best_transactions(pool, ctx.best_transaction_attributes()); - if let Some(cancelled) = - ctx.execute_best_transactions::<_, Pool>(&mut info, &mut db, best_txs)? - { - return Ok(cancelled) + if ctx.execute_best_transactions::<_, Pool>(&mut info, state, best_txs)?.is_some() { + return Ok(BuildOutcomeKind::Cancelled) } // check if the new payload is even more valuable @@ -273,16 +272,38 @@ where } } - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - ctx.commit_withdrawals(&mut db)?; + let withdrawals_outcome = ctx.commit_withdrawals(state)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call - db.merge_transitions(BundleRetention::Reverts); + state.merge_transitions(BundleRetention::Reverts); + + Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_outcome } }) + } + + /// Builds the payload on top of the state. + pub fn build( + self, + mut state: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm

, + DB: Database + AsRef

, + P: StateRootProvider, + { + let ExecutedPayload { + info, + withdrawals_outcome: WithdrawalsOutcome { withdrawals, withdrawals_root }, + } = match self.execute(&mut state, &ctx)? { + BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, + BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), + BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), + }; let block_number = ctx.block_number(); let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), + state.take_bundle(), vec![info.receipts.clone()].into(), block_number, Vec::new(), @@ -302,7 +323,7 @@ where // // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - db.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( + state.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( |err| { warn!(target: "payload_builder", parent_header=%ctx.parent().hash(), @@ -388,6 +409,24 @@ where Ok(BuildOutcomeKind::Better { payload }) } } + + /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. + pub fn witness( + self, + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result + where + EvmConfig: ConfigureEvm

, + DB: Database + AsRef

, + P: StateProofProvider, + { + let _ = self.execute(state, ctx)?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = + ExecutionWitnessRecord::from_executed_state(state); + let state = state.database.as_ref().witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) + } } /// A type that returns a the [`PayloadTransactions`] that should be included in the pool. @@ -411,6 +450,15 @@ impl OpPayloadTransactions for () { } } +/// Holds the state after execution +#[derive(Debug)] +pub struct ExecutedPayload { + /// Tracked execution info + pub info: ExecutionInfo, + /// Outcome after committing withdrawals. + pub withdrawals_outcome: WithdrawalsOutcome, +} + /// This acts as the container for executed transactions and its byproducts (receipts, gas used) #[derive(Default, Debug)] pub struct ExecutionInfo { @@ -725,13 +773,15 @@ where Ok(info) } - /// Executes the given best transactions and updates the execution info + /// Executes the given best transactions and updates the execution info. + /// + /// Returns `Ok(Some(())` if the job was cancelled. pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, db: &mut State, mut best_txs: impl PayloadTransactions, - ) -> Result>, PayloadBuilderError> + ) -> Result, PayloadBuilderError> where DB: Database, Pool: TransactionPool, @@ -764,7 +814,7 @@ where // check if the job was cancelled, if so we can exit early if self.cancel.is_cancelled() { - return Ok(Some(BuildOutcomeKind::Cancelled)) + return Ok(Some(())) } // Configure the environment for the tx. From 6e00e5842669c98063d55931650c81d7a0ce3da1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 18:33:37 +0100 Subject: [PATCH 498/970] feat: add payload witness fn (#12579) --- crates/optimism/payload/src/builder.rs | 77 ++++++++++++++++++++------ 1 file changed, 61 insertions(+), 16 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 2542ff527df..8fb569f3dac 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -39,6 +39,7 @@ use crate::{ payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; +use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_revm::witness::ExecutionWitnessRecord; use reth_transaction_pool::pool::BestPayloadTransactions; @@ -94,21 +95,6 @@ where EvmConfig: ConfigureEvm

, Txs: OpPayloadTransactions, { - /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload - /// (that has the `parent` as its parent). - pub fn cfg_and_block_env( - &self, - config: &PayloadConfig, - parent: &Header, - ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { - let next_attributes = NextBlockEnvAttributes { - timestamp: config.attributes.timestamp(), - suggested_fee_recipient: config.attributes.suggested_fee_recipient(), - prev_randao: config.attributes.prev_randao(), - }; - self.evm_config.next_cfg_and_block_env(parent, next_attributes) - } - /// Constructs an Optimism payload from the transactions sent via the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in /// the payload attributes, the transaction pool will be ignored and the only transactions @@ -126,7 +112,7 @@ where Pool: TransactionPool, { let (initialized_cfg, initialized_block_env) = self - .cfg_and_block_env(&args.config, &args.config.parent_header) + .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -161,6 +147,65 @@ where } } +impl OpPayloadBuilder +where + EvmConfig: ConfigureEvm
, +{ + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// (that has the `parent` as its parent). + pub fn cfg_and_block_env( + &self, + attributes: &OpPayloadBuilderAttributes, + parent: &Header, + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { + let next_attributes = NextBlockEnvAttributes { + timestamp: attributes.timestamp(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), + prev_randao: attributes.prev_randao(), + }; + self.evm_config.next_cfg_and_block_env(parent, next_attributes) + } + + /// Computes the witness for the payload. + pub fn payload_witness( + &self, + client: &Client, + parent: SealedHeader, + attributes: OpPayloadAttributes, + ) -> Result + where + Client: StateProviderFactory + ChainSpecProvider, + { + let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let (initialized_cfg, initialized_block_env) = + self.cfg_and_block_env(&attributes, &parent).map_err(PayloadBuilderError::other)?; + + let config = PayloadConfig { + parent_header: Arc::new(parent), + attributes, + extra_data: Default::default(), + }; + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel: Default::default(), + best_payload: Default::default(), + }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + let mut state = State::builder().with_database(state).with_bundle_update().build(); + + let builder = OpBuilder { pool: NoopTransactionPool::default(), best: () }; + builder.witness(&mut state, &ctx) + } +} + /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. impl PayloadBuilder for OpPayloadBuilder where From f0b8b9b221ad53d52f809daec5b76c6d11ea7ec3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 15 Nov 2024 23:04:35 +0400 Subject: [PATCH 499/970] feat: add tx_number consistency check to StaticFileProviderRW (#12570) Co-authored-by: Matthias Seitz --- crates/stages/api/src/error.rs | 14 ------- crates/stages/stages/src/stages/bodies.rs | 13 +----- crates/storage/errors/src/provider.rs | 3 ++ .../src/providers/static_file/writer.rs | 41 ++++++++++--------- 4 files changed, 25 insertions(+), 46 deletions(-) diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index b12f5186f3b..9a4ef35aaf2 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,5 +1,4 @@ use crate::PipelineEvent; -use alloy_primitives::TxNumber; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; @@ -100,18 +99,6 @@ pub enum StageError { /// Static File segment segment: StaticFileSegment, }, - /// Unrecoverable inconsistency error related to a transaction number in a static file segment. - #[error( - "inconsistent transaction number for {segment}. db: {database}, static_file: {static_file}" - )] - InconsistentTxNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database transaction number. - database: TxNumber, - /// Expected static file transaction number. - static_file: TxNumber, - }, /// The prune checkpoint for the given segment is missing. #[error("missing prune checkpoint for {0}")] MissingPruneCheckpoint(PruneSegment), @@ -146,7 +133,6 @@ impl StageError { Self::MissingDownloadBuffer | Self::MissingSyncGap | Self::ChannelClosed | - Self::InconsistentTxNumber { .. } | Self::Internal(_) | Self::Fatal(_) ) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 48bc679f5bd..07b97574972 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -183,18 +183,7 @@ where BlockResponse::Full(block) => { // Write transactions for transaction in block.body.transactions() { - let appended_tx_number = - static_file_producer.append_transaction(next_tx_num, transaction)?; - - if appended_tx_number != next_tx_num { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentTxNumber { - segment: StaticFileSegment::Transactions, - database: next_tx_num, - static_file: appended_tx_number, - }) - } + static_file_producer.append_transaction(next_tx_num, transaction)?; // Increment transaction id for each transaction. next_tx_num += 1; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index d60a2adb92b..b6fcee545d5 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -133,6 +133,9 @@ pub enum ProviderError { /// Trying to insert data from an unexpected block number. #[display("trying to append data to {_0} as block #{_1} but expected block #{_2}")] UnexpectedStaticFileBlockNumber(StaticFileSegment, BlockNumber, BlockNumber), + /// Trying to insert data from an unexpected block number. + #[display("trying to append row to {_0} at index #{_1} but expected index #{_2}")] + UnexpectedStaticFileTxNumber(StaticFileSegment, TxNumber, TxNumber), /// Static File Provider was initialized as read-only. #[display("cannot get a writer on a read-only environment.")] ReadOnlyStaticFileAccess, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index ef01bd773c8..5951dbb751f 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -498,16 +498,24 @@ impl StaticFileProviderRW { &mut self, tx_num: TxNumber, value: V, - ) -> ProviderResult { - if self.writer.user_header().tx_range().is_none() { - self.writer.user_header_mut().set_tx_range(tx_num, tx_num); - } else { + ) -> ProviderResult<()> { + if let Some(range) = self.writer.user_header().tx_range() { + let next_tx = range.end() + 1; + if next_tx != tx_num { + return Err(ProviderError::UnexpectedStaticFileTxNumber( + self.writer.user_header().segment(), + tx_num, + next_tx, + )) + } self.writer.user_header_mut().increment_tx(); + } else { + self.writer.user_header_mut().set_tx_range(tx_num, tx_num); } self.append_column(value)?; - Ok(self.writer.user_header().tx_end().expect("qed")) + Ok(()) } /// Appends header to static file. @@ -550,16 +558,12 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_transaction( - &mut self, - tx_num: TxNumber, - tx: impl Compact, - ) -> ProviderResult { + pub fn append_transaction(&mut self, tx_num: TxNumber, tx: impl Compact) -> ProviderResult<()> { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Transactions); - let result = self.append_with_tx_number(tx_num, tx)?; + self.append_with_tx_number(tx_num, tx)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -569,7 +573,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends receipt to static file. @@ -578,16 +582,12 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_receipt( - &mut self, - tx_num: TxNumber, - receipt: &Receipt, - ) -> ProviderResult { + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &Receipt) -> ProviderResult<()> { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); - let result = self.append_with_tx_number(tx_num, receipt)?; + self.append_with_tx_number(tx_num, receipt)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -597,7 +597,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends multiple receipts to the static file. @@ -625,7 +625,8 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = self.append_with_tx_number(tx_num, receipt.borrow())?; + self.append_with_tx_number(tx_num, receipt.borrow())?; + tx_number = tx_num; count += 1; } From b31b1ea288363de9010fa3608be130382625ed14 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Nov 2024 20:44:53 +0100 Subject: [PATCH 500/970] feat: add op debug witness api (#12583) Co-authored-by: Federico Gimenez --- Cargo.lock | 5 ++ crates/optimism/rpc/Cargo.toml | 8 ++- crates/optimism/rpc/src/lib.rs | 1 + crates/optimism/rpc/src/witness.rs | 81 ++++++++++++++++++++++++++++++ crates/rpc/rpc-api/src/debug.rs | 8 +-- 5 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 crates/optimism/rpc/src/witness.rs diff --git a/Cargo.lock b/Cargo.lock index 650dc4607d9..ac3dd4fa988 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8395,12 +8395,15 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-debug", "alloy-rpc-types-eth", "derive_more 1.0.0", + "jsonrpsee-core", "jsonrpsee-types", "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", "parking_lot", "reqwest", "reth-chainspec", @@ -8412,9 +8415,11 @@ dependencies = [ "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-payload-builder", "reth-primitives", "reth-provider", "reth-rpc", + "reth-rpc-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 37b64b774a1..17fafef7096 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-rpc.workspace = true +reth-rpc-api.workspace = true reth-node-api.workspace = true reth-network-api.workspace = true reth-node-builder.workspace = true @@ -31,15 +32,18 @@ reth-chainspec.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true +reth-optimism-payload-builder.workspace = true reth-optimism-forks.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true +alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true +op-alloy-rpc-types-engine.workspace = true op-alloy-consensus.workspace = true revm.workspace = true @@ -49,6 +53,7 @@ tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } # rpc +jsonrpsee-core.workspace = true jsonrpsee-types.workspace = true serde_json.workspace = true @@ -66,5 +71,6 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "revm/optimism", - "reth-optimism-consensus/optimism" + "reth-optimism-consensus/optimism", + "reth-optimism-payload-builder/optimism" ] diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 44d0fa35389..0fa0debdf33 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -13,6 +13,7 @@ pub mod error; pub mod eth; pub mod sequencer; +pub mod witness; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpReceiptBuilder}; diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs new file mode 100644 index 00000000000..0521fa9025d --- /dev/null +++ b/crates/optimism/rpc/src/witness.rs @@ -0,0 +1,81 @@ +//! Support for optimism specific witness RPCs. + +use alloy_consensus::Header; +use alloy_primitives::B256; +use alloy_rpc_types_debug::ExecutionWitness; +use jsonrpsee_core::RpcResult; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_payload_builder::OpPayloadBuilder; +use reth_primitives::SealedHeader; +use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; +use reth_rpc_api::DebugExecutionWitnessApiServer; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use std::{fmt::Debug, sync::Arc}; + +/// An extension to the `debug_` namespace of the RPC API. +pub struct OpDebugWitnessApi { + inner: Arc>, +} + +impl OpDebugWitnessApi { + /// Creates a new instance of the `OpDebugWitnessApi`. + pub fn new(provider: Provider, evm_config: EvmConfig) -> Self { + let builder = OpPayloadBuilder::new(evm_config); + let inner = OpDebugWitnessApiInner { provider, builder }; + Self { inner: Arc::new(inner) } + } +} + +impl OpDebugWitnessApi +where + Provider: BlockReaderIdExt, +{ + /// Fetches the parent header by hash. + fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi +where + Provider: BlockReaderIdExt + + StateProviderFactory + + ChainSpecProvider + + 'static, + EvmConfig: ConfigureEvm
+ 'static, +{ + fn execute_payload( + &self, + parent_block_hash: B256, + attributes: OpPayloadAttributes, + ) -> RpcResult { + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + self.inner + .builder + .payload_witness(&self.inner.provider, parent_header, attributes) + .map_err(|err| internal_rpc_err(err.to_string())) + } +} + +impl Clone for OpDebugWitnessApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} +impl Debug for OpDebugWitnessApi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() + } +} + +struct OpDebugWitnessApiInner { + provider: Provider, + builder: OpPayloadBuilder, +} diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 52b63fe3021..28ed9af5c13 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -399,10 +399,10 @@ pub trait DebugExecutionWitnessApi { /// hashed trie nodes to their preimages that were required during the execution of the block, /// including during state root recomputation. /// - /// The first argument is the block number or block hash. The second argument is the payload - /// attributes for the new block. The third argument is a list of transactions to be included. - #[method(name = "executePayload")] - async fn execute_payload( + /// The first argument is the parent block hash. The second argument is the payload + /// attributes for the new block. + #[method(name = "executePayload", blocking)] + fn execute_payload( &self, parent_block_hash: B256, attributes: Attributes, From 02237bfa711ea6d1757a2569ac59639a6ed36e5a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Nov 2024 05:19:40 +0100 Subject: [PATCH 501/970] feat: add contains fns to transport rpc modules (#12593) --- crates/rpc/rpc-builder/src/lib.rs | 20 ++++++++++++++++++++ crates/rpc/rpc-server-types/src/module.rs | 9 +++++++++ 2 files changed, 29 insertions(+) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 27eceed98cb..8af60bda187 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1934,6 +1934,26 @@ impl TransportRpcModuleConfig { self.config.as_ref() } + /// Returns true if the given module is configured for any transport. + pub fn contains_any(&self, module: &RethRpcModule) -> bool { + self.contains_http(module) || self.contains_ws(module) || self.contains_ipc(module) + } + + /// Returns true if the given module is configured for the http transport. + pub fn contains_http(&self, module: &RethRpcModule) -> bool { + self.http.as_ref().map_or(false, |http| http.contains(module)) + } + + /// Returns true if the given module is configured for the ws transport. + pub fn contains_ws(&self, module: &RethRpcModule) -> bool { + self.ws.as_ref().map_or(false, |ws| ws.contains(module)) + } + + /// Returns true if the given module is configured for the ipc transport. + pub fn contains_ipc(&self, module: &RethRpcModule) -> bool { + self.ipc.as_ref().map_or(false, |ipc| ipc.contains(module)) + } + /// Ensures that both http and ws are configured and that they are configured to use the same /// port. fn ensure_ws_http_identical(&self) -> Result<(), WsHttpSamePortError> { diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 9f96ff0cef3..43e4a937436 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -140,6 +140,15 @@ impl RpcModuleSelection { (None, None) => true, } } + + /// Returns true if the selection contains the given module. + pub fn contains(&self, module: &RethRpcModule) -> bool { + match self { + Self::All => true, + Self::Standard => Self::STANDARD_MODULES.contains(module), + Self::Selection(s) => s.contains(module), + } + } } impl From<&HashSet> for RpcModuleSelection { From 5276093e71ff48bfcc8c1cab00ae7c946eb4418f Mon Sep 17 00:00:00 2001 From: Ayodeji Akinola Date: Sat, 16 Nov 2024 05:39:34 +0100 Subject: [PATCH 502/970] chore(util): Add reth payload util (#12590) --- Cargo.lock | 12 ++ Cargo.toml | 2 + crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/tests/it/priority.rs | 6 +- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 3 +- crates/payload/util/Cargo.toml | 20 ++++ crates/payload/util/src/lib.rs | 15 +++ crates/payload/util/src/traits.rs | 20 ++++ crates/payload/util/src/transaction.rs | 128 +++++++++++++++++++++ crates/transaction-pool/Cargo.toml | 1 + crates/transaction-pool/src/pool/best.rs | 131 +--------------------- crates/transaction-pool/src/pool/mod.rs | 1 - crates/transaction-pool/src/traits.rs | 18 --- 14 files changed, 207 insertions(+), 152 deletions(-) create mode 100644 crates/payload/util/Cargo.toml create mode 100644 crates/payload/util/src/lib.rs create mode 100644 crates/payload/util/src/traits.rs create mode 100644 crates/payload/util/src/transaction.rs diff --git a/Cargo.lock b/Cargo.lock index ac3dd4fa988..9514d361fbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8324,6 +8324,7 @@ dependencies = [ "reth-optimism-payload-builder", "reth-optimism-rpc", "reth-payload-builder", + "reth-payload-util", "reth-primitives", "reth-provider", "reth-revm", @@ -8359,6 +8360,7 @@ dependencies = [ "reth-optimism-forks", "reth-payload-builder", "reth-payload-primitives", + "reth-payload-util", "reth-primitives", "reth-provider", "reth-revm", @@ -8486,6 +8488,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-payload-util" +version = "1.1.1" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "reth-primitives", +] + [[package]] name = "reth-payload-validator" version = "1.1.1" @@ -9265,6 +9276,7 @@ dependencies = [ "reth-execution-types", "reth-fs-util", "reth-metrics", + "reth-payload-util", "reth-primitives", "reth-primitives-traits", "reth-provider", diff --git a/Cargo.toml b/Cargo.toml index 3f65bceb4bf..398be3e5faf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,6 +82,7 @@ members = [ "crates/payload/builder/", "crates/payload/primitives/", "crates/payload/validator/", + "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", "crates/prune/prune", @@ -381,6 +382,7 @@ reth-optimism-storage = { path = "crates/optimism/storage" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } +reth-payload-util = { path = "crates/payload/util" } reth-primitives = { path = "crates/primitives", default-features = false, features = [ "std", ] } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index fb8cc27787e..9a80c83deec 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true +reth-payload-util.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 52e3bef3d91..f1260d2da01 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -25,12 +25,10 @@ use reth_optimism_node::{ OpEngineTypes, OpNode, }; use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::providers::BlockchainProvider2; -use reth_transaction_pool::{ - pool::{BestPayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}, - PayloadTransactions, -}; +use reth_transaction_pool::pool::BestPayloadTransactions; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 646df040e19..19a47be4951 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true +reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true reth-trie.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 8fb569f3dac..d0eb464ae02 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -16,6 +16,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OpHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_util::PayloadTransactions; use reth_primitives::{ proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, @@ -24,7 +25,7 @@ use reth_primitives::{ use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, PayloadTransactions, TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, }; use reth_trie::HashedPostState; use revm::{ diff --git a/crates/payload/util/Cargo.toml b/crates/payload/util/Cargo.toml new file mode 100644 index 00000000000..2da8dc66028 --- /dev/null +++ b/crates/payload/util/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-payload-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "reth payload utilities" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-consensus.workspace = true \ No newline at end of file diff --git a/crates/payload/util/src/lib.rs b/crates/payload/util/src/lib.rs new file mode 100644 index 00000000000..5ad0e83507b --- /dev/null +++ b/crates/payload/util/src/lib.rs @@ -0,0 +1,15 @@ +//! payload utils. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod traits; +mod transaction; + +pub use traits::PayloadTransactions; +pub use transaction::{PayloadTransactionsChain, PayloadTransactionsFixed}; diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs new file mode 100644 index 00000000000..52dad511169 --- /dev/null +++ b/crates/payload/util/src/traits.rs @@ -0,0 +1,20 @@ +use alloy_primitives::Address; +use reth_primitives::TransactionSignedEcRecovered; + +/// Iterator that returns transactions for the block building process in the order they should be +/// included in the block. +/// +/// Can include transactions from the pool and other sources (alternative pools, +/// sequencer-originated transactions, etc.). +pub trait PayloadTransactions { + /// Returns the next transaction to include in the block. + fn next( + &mut self, + // In the future, `ctx` can include access to state for block building purposes. + ctx: (), + ) -> Option; + + /// Exclude descendants of the transaction with given sender and nonce from the iterator, + /// because this transaction won't be included in the block. + fn mark_invalid(&mut self, sender: Address, nonce: u64); +} diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs new file mode 100644 index 00000000000..a45e177d4d3 --- /dev/null +++ b/crates/payload/util/src/transaction.rs @@ -0,0 +1,128 @@ +use crate::PayloadTransactions; +use alloy_consensus::Transaction; +use alloy_primitives::Address; +use reth_primitives::TransactionSignedEcRecovered; + +/// An implementation of [`crate::traits::PayloadTransactions`] that yields +/// a pre-defined set of transactions. +/// +/// This is useful to put a sequencer-specified set of transactions into the block +/// and compose it with the rest of the transactions. +#[derive(Debug)] +pub struct PayloadTransactionsFixed { + transactions: Vec, + index: usize, +} + +impl PayloadTransactionsFixed { + /// Constructs a new [`PayloadTransactionsFixed`]. + pub fn new(transactions: Vec) -> Self { + Self { transactions, index: Default::default() } + } + + /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. + pub fn single(transaction: T) -> Self { + Self { transactions: vec![transaction], index: Default::default() } + } +} + +impl PayloadTransactions for PayloadTransactionsFixed { + fn next(&mut self, _ctx: ()) -> Option { + (self.index < self.transactions.len()).then(|| { + let tx = self.transactions[self.index].clone(); + self.index += 1; + tx + }) + } + + fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} +} + +/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple +/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. +/// +/// We can't use [`Iterator::chain`], because: +/// (a) we need to propagate the `mark_invalid` and `no_updates` +/// (b) we need to keep track of the gas +/// +/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator +/// before moving to the second one. +/// +/// If the `before` iterator has transactions that are not fitting into the block, +/// the after iterator will get propagated a `mark_invalid` call for each of them. +#[derive(Debug)] +pub struct PayloadTransactionsChain { + /// Iterator that will be used first + before: B, + /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is + /// enforced. + before_max_gas: Option, + /// Gas used by the transactions from `before` iterator + before_gas: u64, + /// Iterator that will be used after `before` iterator + after: A, + /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is + /// enforced. + after_max_gas: Option, + /// Gas used by the transactions from `after` iterator + after_gas: u64, +} + +impl PayloadTransactionsChain { + /// Constructs a new [`PayloadTransactionsChain`]. + pub fn new( + before: B, + before_max_gas: Option, + after: A, + after_max_gas: Option, + ) -> Self { + Self { + before, + before_max_gas, + before_gas: Default::default(), + after, + after_max_gas, + after_gas: Default::default(), + } + } +} + +impl PayloadTransactions for PayloadTransactionsChain +where + B: PayloadTransactions, + A: PayloadTransactions, +{ + fn next(&mut self, ctx: ()) -> Option { + while let Some(tx) = self.before.next(ctx) { + if let Some(before_max_gas) = self.before_max_gas { + if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { + self.before_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + while let Some(tx) = self.after.next(ctx) { + if let Some(after_max_gas) = self.after_max_gas { + if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { + self.after_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + None + } + + fn mark_invalid(&mut self, sender: Address, nonce: u64) { + self.before.mark_invalid(sender, nonce); + self.after.mark_invalid(sender, nonce); + } +} diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 7c760c81c54..22df8253682 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -18,6 +18,7 @@ reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } reth-primitives-traits.workspace = true +reth-payload-util.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 21bcc668b75..7c2e5a025b7 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,11 +1,11 @@ use crate::{ identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, - PayloadTransactions, PoolTransaction, TransactionOrdering, ValidPoolTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_consensus::Transaction; use alloy_primitives::Address; use core::fmt; +use reth_payload_util::PayloadTransactions; use reth_primitives::TransactionSignedEcRecovered; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, @@ -20,7 +20,6 @@ use tracing::debug; /// This is a wrapper around [`BestTransactions`] that also enforces a specific basefee. /// /// This iterator guarantees that all transaction it returns satisfy both the base fee and blob fee! -#[derive(Debug)] pub(crate) struct BestTransactionsWithFees { pub(crate) best: BestTransactions, pub(crate) base_fee: u64, @@ -73,7 +72,6 @@ impl Iterator for BestTransactionsWithFees { /// be executed on the current state, but only yields transactions that are ready to be executed /// now. While it contains all gapless transactions of a sender, it _always_ only returns the /// transaction with the current on chain nonce. -#[derive(Debug)] pub(crate) struct BestTransactions { /// Contains a copy of _all_ transactions of the pending pool at the point in time this /// iterator was created. @@ -397,130 +395,6 @@ where } } -/// An implementation of [`crate::traits::PayloadTransactions`] that yields -/// a pre-defined set of transactions. -/// -/// This is useful to put a sequencer-specified set of transactions into the block -/// and compose it with the rest of the transactions. -#[derive(Debug)] -pub struct PayloadTransactionsFixed { - transactions: Vec, - index: usize, -} - -impl PayloadTransactionsFixed { - /// Constructs a new [`PayloadTransactionsFixed`]. - pub fn new(transactions: Vec) -> Self { - Self { transactions, index: Default::default() } - } - - /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. - pub fn single(transaction: T) -> Self { - Self { transactions: vec![transaction], index: Default::default() } - } -} - -impl PayloadTransactions for PayloadTransactionsFixed { - fn next(&mut self, _ctx: ()) -> Option { - (self.index < self.transactions.len()).then(|| { - let tx = self.transactions[self.index].clone(); - self.index += 1; - tx - }) - } - - fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} -} - -/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple -/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. -/// -/// We can't use [`Iterator::chain`], because: -/// (a) we need to propagate the `mark_invalid` and `no_updates` -/// (b) we need to keep track of the gas -/// -/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator -/// before moving to the second one. -/// -/// If the `before` iterator has transactions that are not fitting into the block, -/// the after iterator will get propagated a `mark_invalid` call for each of them. -#[derive(Debug)] -pub struct PayloadTransactionsChain { - /// Iterator that will be used first - before: B, - /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is - /// enforced. - before_max_gas: Option, - /// Gas used by the transactions from `before` iterator - before_gas: u64, - /// Iterator that will be used after `before` iterator - after: A, - /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is - /// enforced. - after_max_gas: Option, - /// Gas used by the transactions from `after` iterator - after_gas: u64, -} - -impl PayloadTransactionsChain { - /// Constructs a new [`PayloadTransactionsChain`]. - pub fn new( - before: B, - before_max_gas: Option, - after: A, - after_max_gas: Option, - ) -> Self { - Self { - before, - before_max_gas, - before_gas: Default::default(), - after, - after_max_gas, - after_gas: Default::default(), - } - } -} - -impl PayloadTransactions for PayloadTransactionsChain -where - B: PayloadTransactions, - A: PayloadTransactions, -{ - fn next(&mut self, ctx: ()) -> Option { - while let Some(tx) = self.before.next(ctx) { - if let Some(before_max_gas) = self.before_max_gas { - if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { - self.before_gas += tx.transaction.gas_limit(); - return Some(tx); - } - self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); - } else { - return Some(tx); - } - } - - while let Some(tx) = self.after.next(ctx) { - if let Some(after_max_gas) = self.after_max_gas { - if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { - self.after_gas += tx.transaction.gas_limit(); - return Some(tx); - } - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); - } else { - return Some(tx); - } - } - - None - } - - fn mark_invalid(&mut self, sender: Address, nonce: u64) { - self.before.mark_invalid(sender, nonce); - self.after.mark_invalid(sender, nonce); - } -} - #[cfg(test)] mod tests { use super::*; @@ -530,6 +404,7 @@ mod tests { Priority, }; use alloy_primitives::U256; + use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; #[test] fn test_best_iter() { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 78cc790e942..6441ed687f2 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -108,7 +108,6 @@ use crate::{ }; pub use best::{ BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, - PayloadTransactionsChain, PayloadTransactionsFixed, }; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9b4cccfb9d1..6c247a84cdb 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1510,24 +1510,6 @@ impl Stream for NewSubpoolTransactionStream { } } -/// Iterator that returns transactions for the block building process in the order they should be -/// included in the block. -/// -/// Can include transactions from the pool and other sources (alternative pools, -/// sequencer-originated transactions, etc.). -pub trait PayloadTransactions { - /// Returns the next transaction to include in the block. - fn next( - &mut self, - // In the future, `ctx` can include access to state for block building purposes. - ctx: (), - ) -> Option; - - /// Exclude descendants of the transaction with given sender and nonce from the iterator, - /// because this transaction won't be included in the block. - fn mark_invalid(&mut self, sender: Address, nonce: u64); -} - #[cfg(test)] mod tests { use super::*; From c160005531347c5745cb34b81f295e8b64a4d25a Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 16 Nov 2024 05:39:49 +0100 Subject: [PATCH 503/970] rm generics when useless (#12581) --- crates/cli/commands/src/db/checksum.rs | 8 ++++---- crates/cli/commands/src/db/get.rs | 4 ++-- crates/cli/commands/src/db/tui.rs | 2 +- crates/cli/commands/src/import.rs | 2 +- crates/cli/util/src/load_secret_key.rs | 5 +---- crates/cli/util/src/parsers.rs | 8 ++++---- 6 files changed, 13 insertions(+), 16 deletions(-) diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 9aa48e0e865..4c986dc0332 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -79,17 +79,17 @@ impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N let mut cursor = tx.cursor_read::>()?; let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) { (Some(start), Some(end)) => { - let start_key = table_key::(start).map(RawKey::::new)?; - let end_key = table_key::(end).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(start_key..=end_key)? } (None, Some(end)) => { - let end_key = table_key::(end).map(RawKey::::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(..=end_key)? } (Some(start), None) => { - let start_key = table_key::(start).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; cursor.walk_range(start_key..)? } (None, None) => cursor.walk_range(..)?, diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 94c0f63dd30..e9fc034519f 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -128,12 +128,12 @@ impl Command { /// Get an instance of key for given table pub(crate) fn table_key(key: &str) -> Result { - serde_json::from_str::(key).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(key).map_err(|e| eyre::eyre!(e)) } /// Get an instance of subkey for given dupsort table fn table_subkey(subkey: Option<&str>) -> Result { - serde_json::from_str::(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) } struct GetValueViewer<'a, N: NodeTypesWithDB> { diff --git a/crates/cli/commands/src/db/tui.rs b/crates/cli/commands/src/db/tui.rs index 240ca376970..1a9fae7f891 100644 --- a/crates/cli/commands/src/db/tui.rs +++ b/crates/cli/commands/src/db/tui.rs @@ -365,7 +365,7 @@ where .map(|(i, k)| { ListItem::new(format!("[{:0>width$}]: {k:?}", i + app.skip, width = key_length)) }) - .collect::>>(); + .collect::>(); let key_list = List::new(formatted_keys) .block(Block::default().borders(Borders::ALL).title(format!( diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index a7c81e53052..ebda2deafa6 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -203,7 +203,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let pipeline = Pipeline::::builder() + let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) diff --git a/crates/cli/util/src/load_secret_key.rs b/crates/cli/util/src/load_secret_key.rs index 25da0e06676..8b3bee09c8c 100644 --- a/crates/cli/util/src/load_secret_key.rs +++ b/crates/cli/util/src/load_secret_key.rs @@ -41,10 +41,7 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result { let contents = fs::read_to_string(secret_key_path)?; - Ok(contents - .as_str() - .parse::() - .map_err(SecretKeyError::SecretKeyDecodeError)?) + Ok(contents.as_str().parse().map_err(SecretKeyError::SecretKeyDecodeError)?) } Ok(false) => { if let Some(dir) = secret_key_path.parent() { diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 9bb803bcca8..fb27e1420c0 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -23,11 +23,11 @@ pub fn parse_duration_from_secs_or_ms( arg: &str, ) -> eyre::Result { if arg.ends_with("ms") { - arg.trim_end_matches("ms").parse::().map(Duration::from_millis) + arg.trim_end_matches("ms").parse().map(Duration::from_millis) } else if arg.ends_with('s') { - arg.trim_end_matches('s').parse::().map(Duration::from_secs) + arg.trim_end_matches('s').parse().map(Duration::from_secs) } else { - arg.parse::().map(Duration::from_secs) + arg.parse().map(Duration::from_secs) } } @@ -75,7 +75,7 @@ pub fn parse_socket_address(value: &str) -> eyre::Result() { + if let Ok(port) = value.parse() { return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) } value From 7745046b0b30cbce916606222aadd6de72beb9fa Mon Sep 17 00:00:00 2001 From: Hopium <135053852+Hopium21@users.noreply.github.com> Date: Sat, 16 Nov 2024 05:44:37 +0100 Subject: [PATCH 504/970] Fix grammar in MDBX documentation (#12580) Co-authored-by: Matthias Seitz --- crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 index 7b182325b31..0934fea1c16 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 @@ -27,7 +27,7 @@ mdbx_chk \- MDBX checking tool .SH DESCRIPTION The .B mdbx_chk -utility intended to check an MDBX database file. +utility is intended to check an MDBX database file. .SH OPTIONS .TP .BR \-V @@ -55,7 +55,7 @@ check, including full check of all meta-pages and actual size of database file. .BR \-w Open environment in read-write mode and lock for writing while checking. This could be impossible if environment already used by another process(s) -in an incompatible read-write mode. This allow rollback to last steady commit +in an incompatible read-write mode. This allows rollback to last steady commit (in case environment was not closed properly) and then check transaction IDs of meta-pages. Otherwise, without \fB\-w\fP option environment will be opened in read-only mode. @@ -90,7 +90,7 @@ then forcibly loads ones by sequential access and tries to lock database pages i .TP .BR \-n Open MDBX environment(s) which do not use subdirectories. -This is legacy option. For now MDBX handles this automatically. +This is a legacy option. For now MDBX handles this automatically. .SH DIAGNOSTICS Exit status is zero if no errors occur. Errors result in a non-zero exit status From 2dc9a063219e7c6f1a61edcba4b0e2b9d97c4aef Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Fri, 15 Nov 2024 23:59:53 -0500 Subject: [PATCH 505/970] chore(sdk): add NetworkPrimitives for NetworkManager (#12530) Co-authored-by: dkathiriya --- Cargo.lock | 1 + crates/net/network/Cargo.toml | 4 +- crates/net/network/src/builder.rs | 5 +- crates/net/network/src/config.rs | 31 ++++++----- crates/net/network/src/eth_requests.rs | 22 ++++---- crates/net/network/src/lib.rs | 7 ++- crates/net/network/src/manager.rs | 51 +++++++++--------- crates/net/network/src/network.rs | 57 ++++++++++---------- crates/net/network/src/transactions/mod.rs | 13 ++--- crates/net/network/tests/it/connect.rs | 9 ++-- crates/net/network/tests/it/startup.rs | 15 +++--- examples/bsc-p2p/src/main.rs | 6 ++- examples/custom-rlpx-subprotocol/src/main.rs | 6 +-- examples/network/src/main.rs | 5 +- examples/polygon-p2p/src/main.rs | 5 +- 15 files changed, 130 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9514d361fbf..973ad471872 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7800,6 +7800,7 @@ dependencies = [ "reth-dns-discovery", "reth-ecies", "reth-eth-wire", + "reth-eth-wire-types", "reth-fs-util", "reth-metrics", "reth-net-banlist", diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 09f81e63e54..dde0b4a0b23 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -24,6 +24,7 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true reth-eth-wire.workspace = true +reth-eth-wire-types.workspace = true reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true @@ -111,6 +112,7 @@ serde = [ "reth-dns-discovery/serde", "reth-eth-wire/serde", "reth-provider?/serde", + "reth-eth-wire-types/serde", "alloy-consensus/serde", "alloy-eips/serde", "alloy-primitives/serde", @@ -118,7 +120,7 @@ serde = [ "parking_lot/serde", "rand/serde", "smallvec/serde", - "url/serde" + "url/serde", ] test-utils = [ "dep:reth-provider", diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index e6a5d956641..31038906b25 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,5 +1,6 @@ //! Builder support for configuring the entire setup. +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandleProvider; use reth_transaction_pool::TransactionPool; use tokio::sync::mpsc; @@ -16,8 +17,8 @@ pub(crate) const ETH_REQUEST_CHANNEL_CAPACITY: usize = 256; /// A builder that can configure all components of the network. #[allow(missing_debug_implementations)] -pub struct NetworkBuilder { - pub(crate) network: NetworkManager, +pub struct NetworkBuilder { + pub(crate) network: NetworkManager, pub(crate) transactions: Tx, pub(crate) request_handler: Eth, } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 96aef249d9f..db7b384c2b3 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -6,7 +6,9 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; -use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_eth_wire::{ + EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives, Status, +}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; use reth_primitives::{ForkFilter, Head}; @@ -32,7 +34,7 @@ pub fn rng_secret_key() -> SecretKey { /// All network related initialization settings. #[derive(Debug)] -pub struct NetworkConfig { +pub struct NetworkConfig { /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -66,7 +68,7 @@ pub struct NetworkConfig { /// first hardfork, `Frontier` for mainnet. pub fork_filter: ForkFilter, /// The block importer type. - pub block_import: Box, + pub block_import: Box>, /// The default mode of the network. pub network_mode: NetworkMode, /// The executor to use for spawning tasks. @@ -87,9 +89,9 @@ pub struct NetworkConfig { // === impl NetworkConfig === -impl NetworkConfig<()> { +impl NetworkConfig<(), N> { /// Convenience method for creating the corresponding builder type - pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { + pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } @@ -99,7 +101,7 @@ impl NetworkConfig<()> { } } -impl NetworkConfig { +impl NetworkConfig { /// Create a new instance with all mandatory fields set, rest is field with defaults. pub fn new(client: C, secret_key: SecretKey) -> Self where @@ -134,12 +136,13 @@ impl NetworkConfig { } } -impl NetworkConfig +impl NetworkConfig where C: BlockNumReader + 'static, + N: NetworkPrimitives, { /// Convenience method for calling [`NetworkManager::new`]. - pub async fn manager(self) -> Result { + pub async fn manager(self) -> Result, NetworkError> { NetworkManager::new(self).await } } @@ -164,7 +167,7 @@ where /// Builder for [`NetworkConfig`](struct.NetworkConfig.html). #[derive(Debug)] -pub struct NetworkConfigBuilder { +pub struct NetworkConfigBuilder { /// The node's secret key, from which the node's identity is derived. secret_key: SecretKey, /// How to configure discovery over DNS. @@ -196,7 +199,7 @@ pub struct NetworkConfigBuilder { /// Whether tx gossip is disabled tx_gossip_disabled: bool, /// The block importer type - block_import: Option>, + block_import: Option>>, /// How to instantiate transactions manager. transactions_manager_config: TransactionsManagerConfig, /// The NAT resolver for external IP @@ -206,7 +209,7 @@ pub struct NetworkConfigBuilder { // === impl NetworkConfigBuilder === #[allow(missing_docs)] -impl NetworkConfigBuilder { +impl NetworkConfigBuilder { /// Create a new builder instance with a random secret key. pub fn with_rng_secret_key() -> Self { Self::new(rng_secret_key()) @@ -480,7 +483,7 @@ impl NetworkConfigBuilder { } /// Sets the block import type. - pub fn block_import(mut self, block_import: Box) -> Self { + pub fn block_import(mut self, block_import: Box>) -> Self { self.block_import = Some(block_import); self } @@ -490,7 +493,7 @@ impl NetworkConfigBuilder { pub fn build_with_noop_provider( self, chain_spec: Arc, - ) -> NetworkConfig> + ) -> NetworkConfig, N> where ChainSpec: EthChainSpec + Hardforks + 'static, { @@ -509,7 +512,7 @@ impl NetworkConfigBuilder { /// The given client is to be used for interacting with the chain, for example fetching the /// corresponding block for a given block hash we receive from a peer in the status message when /// establishing a connection. - pub fn build(self, client: C) -> NetworkConfig + pub fn build(self, client: C) -> NetworkConfig where C: ChainSpecProvider, { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 1f20be53967..8121b9675ed 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -12,8 +12,8 @@ use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ - BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, - HeadersDirection, NodeData, Receipts, + BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, }; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; @@ -54,7 +54,7 @@ const SOFT_RESPONSE_LIMIT: usize = 2 * 1024 * 1024; /// This can be spawned to another task and is supposed to be run as background service. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct EthRequestHandler { +pub struct EthRequestHandler { /// The client type that can interact with the chain. client: C, /// Used for reporting peers. @@ -62,15 +62,15 @@ pub struct EthRequestHandler { #[allow(dead_code)] peers: PeersHandle, /// Incoming request from the [`NetworkManager`](crate::NetworkManager). - incoming_requests: ReceiverStream, + incoming_requests: ReceiverStream>, /// Metrics for the eth request handler. metrics: EthRequestHandlerMetrics, } // === impl EthRequestHandler === -impl EthRequestHandler { +impl EthRequestHandler { /// Create a new instance - pub fn new(client: C, peers: PeersHandle, incoming: Receiver) -> Self { + pub fn new(client: C, peers: PeersHandle, incoming: Receiver>) -> Self { Self { client, peers, @@ -148,7 +148,7 @@ where &self, _peer_id: PeerId, request: GetBlockHeaders, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); @@ -159,7 +159,7 @@ where &self, _peer_id: PeerId, request: GetBlockBodies, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); @@ -272,7 +272,7 @@ where /// All `eth` request related to blocks delegated by the network. #[derive(Debug)] -pub enum IncomingEthRequest { +pub enum IncomingEthRequest { /// Request Block headers from the peer. /// /// The response should be sent through the channel. @@ -282,7 +282,7 @@ pub enum IncomingEthRequest { /// The specific block headers requested. request: GetBlockHeaders, /// The channel sender for the response containing block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Block bodies from the peer. /// @@ -293,7 +293,7 @@ pub enum IncomingEthRequest { /// The specific block bodies requested. request: GetBlockBodies, /// The channel sender for the response containing block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Node Data from the peer. /// diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 0e433a38862..0eae99e7c50 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -46,7 +46,9 @@ //! //! ``` //! # async fn launch() { -//! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network::{ +//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, +//! }; //! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! @@ -59,7 +61,7 @@ //! let config = NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client); //! //! // create the network instance -//! let network = NetworkManager::new(config).await.unwrap(); +//! let network = NetworkManager::::new(config).await.unwrap(); //! //! // keep a handle to the network and spawn it //! let handle = network.handle().clone(); @@ -138,6 +140,7 @@ mod state; mod swarm; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; pub use reth_network_api::{ BlockDownloaderProvider, DiscoveredEvent, DiscoveryEvent, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, PeerRequest, PeerRequestSender, Peers, PeersInfo, diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 3a7f94985fc..0738be1bcac 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -29,7 +29,10 @@ use std::{ use futures::{Future, StreamExt}; use parking_lot::Mutex; -use reth_eth_wire::{capability::CapabilityMessage, Capabilities, DisconnectReason}; +use reth_eth_wire::{ + capability::CapabilityMessage, Capabilities, DisconnectReason, EthNetworkPrimitives, + NetworkPrimitives, +}; use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_network_api::{ @@ -76,17 +79,17 @@ use crate::{ /// include_mmd!("docs/mermaid/network-manager.mmd") #[derive(Debug)] #[must_use = "The NetworkManager does nothing unless polled"] -pub struct NetworkManager { +pub struct NetworkManager { /// The type that manages the actual network part, which includes connections. - swarm: Swarm, + swarm: Swarm, /// Underlying network handle that can be shared. - handle: NetworkHandle, + handle: NetworkHandle, /// Receiver half of the command channel set up between this type and the [`NetworkHandle`] - from_handle_rx: UnboundedReceiverStream, + from_handle_rx: UnboundedReceiverStream>, /// Handles block imports according to the `eth` protocol. - block_import: Box, + block_import: Box>, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. to_transactions_manager: Option>, @@ -103,7 +106,7 @@ pub struct NetworkManager { /// Thus, we use a bounded channel here to avoid unbounded build up if the node is flooded with /// requests. This channel size is set at /// [`ETH_REQUEST_CHANNEL_CAPACITY`](crate::builder::ETH_REQUEST_CHANNEL_CAPACITY) - to_eth_request_handler: Option>, + to_eth_request_handler: Option>>, /// Tracks the number of active session (connected peers). /// /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] @@ -116,7 +119,7 @@ pub struct NetworkManager { } // === impl NetworkManager === -impl NetworkManager { +impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`TransactionsManager`](crate::transactions::TransactionsManager). pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender) { @@ -126,7 +129,7 @@ impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler). - pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender) { + pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender>) { self.to_eth_request_handler = Some(tx); } @@ -138,7 +141,7 @@ impl NetworkManager { /// Returns the [`NetworkHandle`] that can be cloned and shared. /// /// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`] - pub const fn handle(&self) -> &NetworkHandle { + pub const fn handle(&self) -> &NetworkHandle { &self.handle } @@ -165,7 +168,7 @@ impl NetworkManager { /// The [`NetworkManager`] is an endless future that needs to be polled in order to advance the /// state of the entire network. pub async fn new( - config: NetworkConfig, + config: NetworkConfig, ) -> Result { let NetworkConfig { client, @@ -253,7 +256,7 @@ impl NetworkManager { let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel(); - let event_sender: EventSender = Default::default(); + let event_sender: EventSender>> = Default::default(); let handle = NetworkHandle::new( Arc::clone(&num_active_peers), @@ -314,14 +317,14 @@ impl NetworkManager { /// } /// ``` pub async fn builder( - config: NetworkConfig, - ) -> Result, NetworkError> { + config: NetworkConfig, + ) -> Result, NetworkError> { let network = Self::new(config).await?; Ok(network.into_builder()) } /// Create a [`NetworkBuilder`] to configure all components of the network - pub const fn into_builder(self) -> NetworkBuilder<(), ()> { + pub const fn into_builder(self) -> NetworkBuilder<(), (), N> { NetworkBuilder { network: self, transactions: (), request_handler: () } } @@ -369,7 +372,7 @@ impl NetworkManager { /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. - pub fn fetch_client(&self) -> FetchClient { + pub fn fetch_client(&self) -> FetchClient { self.swarm.state().fetch_client() } @@ -416,7 +419,7 @@ impl NetworkManager { /// Sends an event to the [`EthRequestManager`](crate::eth_requests::EthRequestHandler) if /// configured. - fn delegate_eth_request(&self, event: IncomingEthRequest) { + fn delegate_eth_request(&self, event: IncomingEthRequest) { if let Some(ref reqs) = self.to_eth_request_handler { let _ = reqs.try_send(event).map_err(|e| { if let TrySendError::Full(_) = e { @@ -428,7 +431,7 @@ impl NetworkManager { } /// Handle an incoming request from the peer - fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { @@ -469,7 +472,7 @@ impl NetworkManager { } /// Invoked after a `NewBlock` message from the peer was validated - fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { + fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { let BlockImportOutcome { peer, result } = outcome; match result { Ok(validated_block) => match validated_block { @@ -511,7 +514,7 @@ impl NetworkManager { } /// Handles a received Message from the peer's session. - fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { + fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(hashes) => { self.within_pow_or_disconnect(peer_id, |this| { @@ -551,7 +554,7 @@ impl NetworkManager { } /// Handler for received messages from a handle - fn on_handle_message(&mut self, msg: NetworkHandleMessage) { + fn on_handle_message(&mut self, msg: NetworkHandleMessage) { match msg { NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); @@ -646,7 +649,7 @@ impl NetworkManager { } } - fn on_swarm_event(&mut self, event: SwarmEvent) { + fn on_swarm_event(&mut self, event: SwarmEvent) { // handle event match event { SwarmEvent::ValidMessage { peer_id, message } => self.on_peer_message(peer_id, message), @@ -981,7 +984,7 @@ impl NetworkManager { } } -impl Future for NetworkManager { +impl Future for NetworkManager { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 4175757e0cf..1715fa63e2f 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -11,7 +11,10 @@ use enr::Enr; use parking_lot::Mutex; use reth_discv4::{Discv4, NatResolver}; use reth_discv5::Discv5; -use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; +use reth_eth_wire::{ + DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, + NewPooledTransactionHashes, SharedTransactions, +}; use reth_network_api::{ test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -39,20 +42,20 @@ use crate::{ /// /// See also [`NetworkManager`](crate::NetworkManager). #[derive(Clone, Debug)] -pub struct NetworkHandle { +pub struct NetworkHandle { /// The Arc'ed delegate that contains the state. - inner: Arc, + inner: Arc>, } // === impl NetworkHandle === -impl NetworkHandle { +impl NetworkHandle { /// Creates a single new instance. #[allow(clippy::too_many_arguments)] pub(crate) fn new( num_active_peers: Arc, listener_address: Arc>, - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, secret_key: SecretKey, local_peer_id: PeerId, peers: PeersHandle, @@ -61,7 +64,7 @@ impl NetworkHandle { tx_gossip_disabled: bool, discv4: Option, discv5: Option, - event_sender: EventSender, + event_sender: EventSender>>, nat: Option, ) -> Self { let inner = NetworkInner { @@ -89,7 +92,7 @@ impl NetworkHandle { &self.inner.local_peer_id } - fn manager(&self) -> &UnboundedSender { + fn manager(&self) -> &UnboundedSender> { &self.inner.to_manager_tx } @@ -99,7 +102,7 @@ impl NetworkHandle { } /// Sends a [`NetworkHandleMessage`] to the manager - pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { + pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { let _ = self.inner.to_manager_tx.send(msg); } @@ -113,12 +116,12 @@ impl NetworkHandle { /// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p. /// Instead they are sent to the node by CL and can be requested over devp2p. /// Broadcasting new blocks is considered a protocol violation. - pub fn announce_block(&self, block: NewBlock, hash: B256) { + pub fn announce_block(&self, block: NewBlock, hash: B256) { self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash)) } /// Sends a [`PeerRequest`] to the given peer's session. - pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { + pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { self.send_message(NetworkHandleMessage::EthRequest { peer_id, request }) } @@ -186,8 +189,8 @@ impl NetworkHandle { // === API Implementations === -impl NetworkEventListenerProvider for NetworkHandle { - fn event_listener(&self) -> EventStream { +impl NetworkEventListenerProvider for NetworkHandle { + fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() } @@ -198,13 +201,13 @@ impl NetworkEventListenerProvider for NetworkHandle { } } -impl NetworkProtocols for NetworkHandle { +impl NetworkProtocols for NetworkHandle { fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol) { self.send_message(NetworkHandleMessage::AddRlpxSubProtocol(protocol)) } } -impl PeersInfo for NetworkHandle { +impl PeersInfo for NetworkHandle { fn num_connected_peers(&self) -> usize { self.inner.num_active_peers.load(Ordering::Relaxed) } @@ -337,13 +340,13 @@ impl Peers for NetworkHandle { } } -impl PeersHandleProvider for NetworkHandle { +impl PeersHandleProvider for NetworkHandle { fn peers_handle(&self) -> &PeersHandle { &self.inner.peers } } -impl NetworkInfo for NetworkHandle { +impl NetworkInfo for NetworkHandle { fn local_addr(&self) -> SocketAddr { *self.inner.listener_address.lock() } @@ -367,7 +370,7 @@ impl NetworkInfo for NetworkHandle { } } -impl SyncStateProvider for NetworkHandle { +impl SyncStateProvider for NetworkHandle { fn is_syncing(&self) -> bool { self.inner.is_syncing.load(Ordering::Relaxed) } @@ -380,7 +383,7 @@ impl SyncStateProvider for NetworkHandle { } } -impl NetworkSyncUpdater for NetworkHandle { +impl NetworkSyncUpdater for NetworkHandle { fn update_sync_state(&self, state: SyncState) { let future_state = state.is_syncing(); let prev_state = self.inner.is_syncing.swap(future_state, Ordering::Relaxed); @@ -396,8 +399,8 @@ impl NetworkSyncUpdater for NetworkHandle { } } -impl BlockDownloaderProvider for NetworkHandle { - type Client = FetchClient; +impl BlockDownloaderProvider for NetworkHandle { + type Client = FetchClient; async fn fetch_client(&self) -> Result { let (tx, rx) = oneshot::channel(); @@ -407,11 +410,11 @@ impl BlockDownloaderProvider for NetworkHandle { } #[derive(Debug)] -struct NetworkInner { +struct NetworkInner { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Sender half of the message channel to the [`crate::NetworkManager`]. - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, /// The local address that accepts incoming connections. listener_address: Arc>, /// The secret key used for authenticating sessions. @@ -435,7 +438,7 @@ struct NetworkInner { /// The instance of the discv5 service discv5: Option, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// The NAT resolver nat: Option, } @@ -448,7 +451,7 @@ pub trait NetworkProtocols: Send + Sync { /// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager). #[derive(Debug)] -pub(crate) enum NetworkHandleMessage { +pub(crate) enum NetworkHandleMessage { /// Marks a peer as trusted. AddTrustedPeerId(PeerId), /// Adds an address for a peer, including its ID, kind, and socket address. @@ -458,7 +461,7 @@ pub(crate) enum NetworkHandleMessage { /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. DisconnectPeer(PeerId, Option), /// Broadcasts an event to announce a new block to all nodes. - AnnounceBlock(NewBlock, B256), + AnnounceBlock(NewBlock, B256), /// Sends a list of transactions to the given peer. SendTransaction { /// The ID of the peer to which the transactions are sent. @@ -478,12 +481,12 @@ pub(crate) enum NetworkHandleMessage { /// The peer to send the request to. peer_id: PeerId, /// The request to send to the peer's sessions. - request: PeerRequest, + request: PeerRequest, }, /// Applies a reputation change to the given peer. ReputationChange(PeerId, ReputationChangeKind), /// Returns the client that can be used to interact with the network. - FetchClient(oneshot::Sender), + FetchClient(oneshot::Sender>), /// Applies a status update. StatusUpdate { /// The head status to apply. diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 4e23c8527b4..48ae61e0dd0 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -34,9 +34,10 @@ use std::{ use alloy_primitives::{TxHash, B256}; use futures::{stream::FuturesUnordered, Future, StreamExt}; use reth_eth_wire::{ - DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, - NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, - PooledTransactions, RequestTxHashes, Transactions, + DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, + HandleVersionedMempoolData, NetworkPrimitives, NewPooledTransactionHashes, + NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, + RequestTxHashes, Transactions, }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{ @@ -200,15 +201,15 @@ impl TransactionsHandle { /// propagate new transactions over the network. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct TransactionsManager { +pub struct TransactionsManager { /// Access to the transaction pool. pool: Pool, /// Network access. - network: NetworkHandle, + network: NetworkHandle, /// Subscriptions to all network related events. /// /// From which we get all new incoming transaction related messages. - network_events: EventStream, + network_events: EventStream>>, /// Transaction fetcher to handle inflight and missing transaction requests. transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index ec891e5b39a..0a17cbd563e 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -8,7 +8,7 @@ use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_chainspec::MAINNET; use reth_discv4::Discv4Config; -use reth_eth_wire::{DisconnectReason, HeadersDirection}; +use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, HeadersDirection}; use reth_net_banlist::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, @@ -204,8 +204,9 @@ async fn test_connect_with_boot_nodes() { let mut discv4 = Discv4Config::builder(); discv4.add_boot_nodes(mainnet_nodes()); - let config = - NetworkConfigBuilder::new(secret_key).discovery(discv4).build(NoopProvider::default()); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let handle = network.handle().clone(); @@ -572,7 +573,7 @@ async fn test_disconnect_incoming_when_exceeded_incoming_connections() { let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(0); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .peer_config(peers_config) diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index d84ff492e5e..862281ab1ff 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -5,6 +5,7 @@ use std::{ use reth_chainspec::MAINNET; use reth_discv4::{Discv4Config, NatResolver}; +use reth_eth_wire::EthNetworkPrimitives; use reth_network::{ error::{NetworkError, ServiceKind}, Discovery, NetworkConfigBuilder, NetworkManager, @@ -26,7 +27,7 @@ fn is_addr_in_use_kind(err: &NetworkError, kind: ServiceKind) -> bool { #[tokio::test(flavor = "multi_thread")] async fn test_is_default_syncing() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); @@ -37,13 +38,13 @@ async fn test_is_default_syncing() { #[tokio::test(flavor = "multi_thread")] async fn test_listener_addr_in_use() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let listener_port = network.local_addr().port(); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(listener_port) .disable_discovery() .build(NoopProvider::default()); @@ -72,7 +73,7 @@ async fn test_discovery_addr_in_use() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_no_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .build_with_noop_provider(MAINNET.clone()); @@ -90,7 +91,7 @@ async fn test_tcp_port_node_record_no_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .discovery_port(0) .disable_dns_discovery() @@ -109,7 +110,7 @@ async fn test_tcp_port_node_record_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() @@ -125,7 +126,7 @@ async fn test_node_record_address_with_nat() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat_disable_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() .listener_port(0) diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs index e46ea4bec35..9e83f34e92f 100644 --- a/examples/bsc-p2p/src/main.rs +++ b/examples/bsc-p2p/src/main.rs @@ -14,7 +14,9 @@ use chainspec::{boot_nodes, bsc_chain_spec}; use reth_discv4::Discv4ConfigBuilder; -use reth_network::{NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager}; +use reth_network::{ + EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, +}; use reth_network_api::PeersInfo; use reth_primitives::{ForkHash, ForkId}; use reth_tracing::{ @@ -62,7 +64,7 @@ async fn main() { // latest BSC forkId, we need to override this to allow connections from BSC nodes let fork_id = ForkId { hash: ForkHash([0x07, 0xb5, 0x43, 0x28]), next: 0 }; net_cfg.fork_filter.set_current_fork_id(fork_id); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle().clone(); diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs index e16f71071c8..702d0e8cf5e 100644 --- a/examples/custom-rlpx-subprotocol/src/main.rs +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -14,8 +14,8 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth::builder::NodeHandle; use reth_network::{ - config::SecretKey, protocol::IntoRlpxSubProtocol, NetworkConfig, NetworkManager, - NetworkProtocols, + config::SecretKey, protocol::IntoRlpxSubProtocol, EthNetworkPrimitives, NetworkConfig, + NetworkManager, NetworkProtocols, }; use reth_network_api::{test_utils::PeersHandleProvider, NetworkInfo}; use reth_node_ethereum::EthereumNode; @@ -53,7 +53,7 @@ fn main() -> eyre::Result<()> { .build_with_noop_provider(node.chain_spec()); // spawn the second network instance - let subnetwork = NetworkManager::new(net_cfg).await?; + let subnetwork = NetworkManager::::new(net_cfg).await?; let subnetwork_peer_id = *subnetwork.peer_id(); let subnetwork_peer_addr = subnetwork.local_addr(); let subnetwork_handle = subnetwork.peers_handle(); diff --git a/examples/network/src/main.rs b/examples/network/src/main.rs index 1d8f436f318..bd4f232a754 100644 --- a/examples/network/src/main.rs +++ b/examples/network/src/main.rs @@ -8,7 +8,8 @@ use futures::StreamExt; use reth_network::{ - config::rng_secret_key, NetworkConfig, NetworkEventListenerProvider, NetworkManager, + config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkEventListenerProvider, + NetworkManager, }; use reth_provider::test_utils::NoopProvider; @@ -24,7 +25,7 @@ async fn main() -> eyre::Result<()> { let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance - let network = NetworkManager::new(config).await?; + let network = NetworkManager::::new(config).await?; // get a handle to the network to interact with it let handle = network.handle().clone(); diff --git a/examples/polygon-p2p/src/main.rs b/examples/polygon-p2p/src/main.rs index 6078ae14cb8..bcc17a24f8d 100644 --- a/examples/polygon-p2p/src/main.rs +++ b/examples/polygon-p2p/src/main.rs @@ -12,7 +12,8 @@ use chain_cfg::{boot_nodes, head, polygon_chain_spec}; use reth_discv4::Discv4ConfigBuilder; use reth_network::{ - config::NetworkMode, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, + config::NetworkMode, EthNetworkPrimitives, NetworkConfig, NetworkEvent, + NetworkEventListenerProvider, NetworkManager, }; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, @@ -57,7 +58,7 @@ async fn main() { discv4_cfg.add_boot_nodes(boot_nodes()).lookup_interval(interval); let net_cfg = net_cfg.set_discovery_v4(discv4_cfg.build()); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle(); From fc97a0cbaf8f4a2a1a864dc61f1b247c08c0dea4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Nov 2024 06:04:39 +0100 Subject: [PATCH 506/970] chore: make clippy happy (#12594) --- crates/cli/commands/src/common.rs | 2 +- crates/engine/primitives/src/forkchoice.rs | 6 +++--- .../net/eth-wire/tests/pooled_transactions.rs | 5 +---- crates/node/builder/src/launch/common.rs | 2 +- crates/optimism/rpc/src/eth/call.rs | 2 +- .../prune/src/segments/static_file/headers.rs | 2 +- crates/prune/types/src/limiter.rs | 4 ++-- crates/revm/src/batch.rs | 6 +++--- crates/rpc/rpc-builder/src/lib.rs | 6 +++--- crates/rpc/rpc-eth-api/src/helpers/call.rs | 2 +- crates/stages/api/src/pipeline/mod.rs | 4 ++-- crates/storage/db/src/metrics.rs | 2 +- crates/storage/db/src/static_file/mod.rs | 2 +- .../src/providers/static_file/manager.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- crates/trie/sparse/src/state.rs | 2 +- crates/trie/sparse/src/trie.rs | 6 +++--- crates/trie/trie/src/forward_cursor.rs | 2 +- .../trie/trie/src/hashed_cursor/post_state.rs | 16 +++++++------- crates/trie/trie/src/node_iter.rs | 2 +- crates/trie/trie/src/trie_cursor/in_memory.rs | 21 ++++++++++--------- crates/trie/trie/src/trie_cursor/subnode.rs | 2 +- crates/trie/trie/src/walker.rs | 4 ++-- crates/trie/trie/src/witness.rs | 4 ++-- 24 files changed, 54 insertions(+), 56 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 3a9cbaa7fbb..21d24a7ff7a 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -111,7 +111,7 @@ impl> Environmen db: Arc, static_file_provider: StaticFileProvider, ) -> eyre::Result>>> { - let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( diff --git a/crates/engine/primitives/src/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs index 3c70b78ecdd..9d680d5a124 100644 --- a/crates/engine/primitives/src/forkchoice.rs +++ b/crates/engine/primitives/src/forkchoice.rs @@ -47,19 +47,19 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_valid()) + self.latest_status().is_some_and(|s| s.is_valid()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_syncing()) + self.latest_status().is_some_and(|s| s.is_syncing()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] pub fn is_latest_invalid(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_invalid()) + self.latest_status().is_some_and(|s| s.is_invalid()) } /// Returns the last valid head hash. diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 22c5fcc3329..3b17d04cba5 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -12,10 +12,7 @@ use test_fuzz::test_fuzz; #[test_fuzz] fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Error> { let input_rlp = &mut &hex_data[..]; - let txs: PooledTransactions = match PooledTransactions::decode(input_rlp) { - Ok(txs) => txs, - Err(e) => return Err(e), - }; + let txs: PooledTransactions = PooledTransactions::decode(input_rlp)?; // get the amount of bytes decoded in `decode` by subtracting the length of the original buf, // from the length of the remaining bytes diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 7e6571135f0..972fdc640df 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -416,7 +416,7 @@ where .with_static_files_metrics(); let has_receipt_pruning = - self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 9b19c488889..ea0086aedfd 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -48,7 +48,7 @@ where request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index ea0264261af..5cd6f62643a 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -91,7 +91,7 @@ impl> Segment bool { - self.deleted_entries_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.deleted_entries_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Increments the number of deleted entries by the given number. @@ -112,7 +112,7 @@ impl PruneLimiter { /// Returns `true` if time limit is reached. pub fn is_time_limit_reached(&self) -> bool { - self.time_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.time_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Returns `true` if any limit is reached. diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index ddb88505b8d..15ba049250f 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -106,11 +106,11 @@ impl BlockBatchRecord { !self .prune_modes .account_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) && + .is_some_and(|mode| mode.should_prune(block_number, tip)) && !self .prune_modes .storage_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) + .is_some_and(|mode| mode.should_prune(block_number, tip)) }) { BundleRetention::Reverts } else { @@ -143,7 +143,7 @@ impl BlockBatchRecord { // Block receipts should not be retained if self.prune_modes.receipts == Some(PruneMode::Full) || // [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`] - self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip)) + self.prune_modes.receipts.is_some_and(|mode| mode.should_prune(block_number, tip)) { receipts.clear(); return Ok(()) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 8af60bda187..ab68d3c88e4 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1941,17 +1941,17 @@ impl TransportRpcModuleConfig { /// Returns true if the given module is configured for the http transport. pub fn contains_http(&self, module: &RethRpcModule) -> bool { - self.http.as_ref().map_or(false, |http| http.contains(module)) + self.http.as_ref().is_some_and(|http| http.contains(module)) } /// Returns true if the given module is configured for the ws transport. pub fn contains_ws(&self, module: &RethRpcModule) -> bool { - self.ws.as_ref().map_or(false, |ws| ws.contains(module)) + self.ws.as_ref().is_some_and(|ws| ws.contains(module)) } /// Returns true if the given module is configured for the ipc transport. pub fn contains_ipc(&self, module: &RethRpcModule) -> bool { - self.ipc.as_ref().map_or(false, |ipc| ipc.contains(module)) + self.ipc.as_ref().is_some_and(|ipc| ipc.contains(module)) } /// Ensures that both http and ws are configured and that they are configured to use the same diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index e45590d4264..d614018a407 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -690,7 +690,7 @@ pub trait Call: LoadState> + SpawnBlocking { request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index bcf857fbac8..39d26cd8808 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -177,7 +177,7 @@ impl Pipeline { self.progress .minimum_block_number .zip(self.max_block) - .map_or(false, |(progress, target)| progress >= target) + .is_some_and(|(progress, target)| progress >= target) { trace!( target: "sync::pipeline", @@ -393,7 +393,7 @@ impl Pipeline { let stage_reached_max_block = prev_checkpoint .zip(self.max_block) - .map_or(false, |(prev_progress, target)| prev_progress.block_number >= target); + .is_some_and(|(prev_progress, target)| prev_progress.block_number >= target); if stage_reached_max_block { warn!( target: "sync::pipeline", diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index fecd691ee5d..2d908c68156 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -347,7 +347,7 @@ impl OperationMetrics { // Record duration only for large values to prevent the performance hit of clock syscall // on small operations - if value_size.map_or(false, |size| size > LARGE_VALUE_THRESHOLD_BYTES) { + if value_size.is_some_and(|size| size > LARGE_VALUE_THRESHOLD_BYTES) { let start = Instant::now(); let result = f(); self.large_value_duration_seconds.record(start.elapsed()); diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f27a574f640..071835f566b 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -38,7 +38,7 @@ pub fn iter_static_files(path: impl AsRef) -> Result>(); for entry in entries { - if entry.metadata().map_or(false, |metadata| metadata.is_file()) { + if entry.metadata().is_ok_and(|metadata| metadata.is_file()) { if let Some((segment, _)) = StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy()) { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 8f6a6957502..bee42fdac83 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1107,7 +1107,7 @@ impl StaticFileProvider { }; if static_file_upper_bound - .map_or(false, |static_file_upper_bound| static_file_upper_bound >= number) + .is_some_and(|static_file_upper_bound| static_file_upper_bound >= number) { return fetch_from_static_file(self) } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 3d72d6a9f15..040deb15fcb 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -318,7 +318,7 @@ impl TxPool { // blob pool that are valid with the lower blob fee if best_transactions_attributes .blob_fee - .map_or(false, |fee| fee < self.all_transactions.pending_fees.blob_fee as u64) + .is_some_and(|fee| fee < self.all_transactions.pending_fees.blob_fee as u64) { let unlocked_by_blob_fee = self.blob_pool.satisfy_attributes(best_transactions_attributes); @@ -1446,7 +1446,7 @@ impl AllTransactions { fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { self.txs_iter(tx.transaction_id.sender) .next() - .map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) + .is_some_and(|(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 126e05e8582..d7557a7a365 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -32,7 +32,7 @@ impl SparseStateTrie { /// Returns `true` if storage slot for account was already revealed. pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool { - self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) + self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } /// Reveal unknown trie paths from provided leaf path and its proof for the account. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 9db1dff5313..696934d3edb 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -529,7 +529,7 @@ impl RevealedSparseTrie { let unset_branch_nibble = self .nodes .get(&child_path) - .map_or(false, move |node| match node { + .is_some_and(move |node| match node { SparseNode::Leaf { key, .. } => { // Get full path of the leaf node child_path.extend_from_slice_unchecked(key); @@ -665,7 +665,7 @@ impl RevealedSparseTrie { child_path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) - } else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { + } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { let (_, child) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); @@ -699,7 +699,7 @@ impl RevealedSparseTrie { .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { - if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { + if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { let (_, child) = buffers.rlp_node_stack.pop().unwrap(); // Insert children in the resulting buffer in a normal order, because // initially we iterated in reverse. diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index 6db214bb51a..745fc351b90 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -30,7 +30,7 @@ where /// exhausted. Returns the first entry for which `comparator` returns `false` or `None`. fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> { let mut entry = self.entries.get(self.index); - while entry.map_or(false, |entry| comparator(&entry.0)) { + while entry.is_some_and(|entry| comparator(&entry.0)) { self.index += 1; entry = self.entries.get(self.index); } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 67891419152..7521bb1b2bc 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -82,14 +82,14 @@ where // It's an exact match, return the account from post state without looking up in the // database. - if post_state_entry.map_or(false, |entry| entry.0 == key) { + if post_state_entry.is_some_and(|entry| entry.0 == key) { return Ok(post_state_entry) } // It's not an exact match, reposition to the first greater or equal account that wasn't // cleared. let mut db_entry = self.cursor.seek(key)?; - while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) { + while db_entry.as_ref().is_some_and(|(address, _)| self.is_account_cleared(address)) { db_entry = self.cursor.next()?; } @@ -103,7 +103,7 @@ where // If post state was given precedence or account was cleared, move the cursor forward. let mut db_entry = self.cursor.seek(last_account)?; - while db_entry.as_ref().map_or(false, |(address, _)| { + while db_entry.as_ref().is_some_and(|(address, _)| { address <= &last_account || self.is_account_cleared(address) }) { db_entry = self.cursor.next()?; @@ -200,14 +200,14 @@ where let post_state_cursor = post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); - let storage_wiped = post_state_storage.map_or(false, |s| s.wiped); + let storage_wiped = post_state_storage.is_some_and(|s| s.wiped); Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } } /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.cleared_slots.map_or(false, |s| s.contains(slot)) + self.cleared_slots.is_some_and(|s| s.contains(slot)) } /// Find the storage entry in post state or database that's greater or equal to provided subkey. @@ -217,14 +217,14 @@ where // If database storage was wiped or it's an exact match, // return the storage slot from post state without looking up in the database. - if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) { + if self.storage_wiped || post_state_entry.is_some_and(|entry| entry.0 == subkey) { return Ok(post_state_entry) } // It's not an exact match and storage was not wiped, // reposition to the first greater or equal account. let mut db_entry = self.cursor.seek(subkey)?; - while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } @@ -248,7 +248,7 @@ where let mut db_entry = self.cursor.seek(last_slot)?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + .is_some_and(|entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index feebe36e16e..60219eedd7c 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -106,7 +106,7 @@ where if let Some((hashed_key, value)) = self.current_hashed_entry.take() { // If the walker's key is less than the unpacked hashed key, // reset the checked status and continue - if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { + if self.walker.key().is_some_and(|key| key < &Nibbles::unpack(hashed_key)) { self.current_walker_key_checked = false; continue } diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 851670f4267..4a34fd31ad1 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -79,13 +79,13 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { exact: bool, ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.seek(&key); - if exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key) { + if exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key) { return Ok(in_memory) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| self.removed_nodes.contains(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -105,7 +105,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { let mut db_entry = self.cursor.seek(last.clone())?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) + .is_some_and(|entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -184,7 +184,7 @@ impl<'a, C> InMemoryStorageTrieCursor<'a, C> { ) -> Self { let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes)); let removed_nodes = updates.map(|u| &u.removed_nodes); - let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted); + let storage_trie_cleared = updates.is_some_and(|u| u.is_deleted); Self { hashed_address, cursor, @@ -204,16 +204,17 @@ impl InMemoryStorageTrieCursor<'_, C> { ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.as_mut().and_then(|c| c.seek(&key)); if self.storage_trie_cleared || - (exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key)) + (exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key)) { return Ok(in_memory.filter(|(nibbles, _)| !exact || nibbles == &key)) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) - }) { + while db_entry + .as_ref() + .is_some_and(|entry| self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0))) + { db_entry = self.cursor.next()?; } @@ -234,8 +235,8 @@ impl InMemoryStorageTrieCursor<'_, C> { // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(last.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - entry.0 < last || self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) + while db_entry.as_ref().is_some_and(|entry| { + entry.0 < last || self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0)) }) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 9d5a2770b26..c928028eb15 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -89,7 +89,7 @@ impl CursorSubNode { /// Returns `true` if the current nibble has a root hash. pub fn hash_flag(&self) -> bool { - self.node.as_ref().map_or(false, |node| match self.nibble { + self.node.as_ref().is_some_and(|node| match self.nibble { // This guy has it -1 => node.root_hash.is_some(), // Or get it from the children diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index aaff293b379..774fa64a0ef 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -88,7 +88,7 @@ impl TrieWalker { /// Indicates whether the children of the current node are present in the trie. pub fn children_are_in_trie(&self) -> bool { - self.stack.last().map_or(false, |n| n.tree_flag()) + self.stack.last().is_some_and(|n| n.tree_flag()) } /// Returns the next unprocessed key in the trie. @@ -112,7 +112,7 @@ impl TrieWalker { self.can_skip_current_node = self .stack .last() - .map_or(false, |node| !self.changes.contains(node.full_key()) && node.hash_flag()); + .is_some_and(|node| !self.changes.contains(node.full_key()) && node.hash_flag()); } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 39d82a7bda7..8290f158062 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -292,7 +292,7 @@ where let mut keys = trie_nodes.keys().peekable(); let mut ignored = HashSet::::default(); while let Some(key) = keys.next() { - if keys.peek().map_or(false, |next| next.starts_with(key)) { + if keys.peek().is_some_and(|next| next.starts_with(key)) { ignored.insert(key.clone()); } } @@ -306,7 +306,7 @@ where if hash_builder.key.starts_with(&parent_branch_path) || trie_nodes .peek() - .map_or(false, |next| next.0.starts_with(&parent_branch_path)) + .is_some_and(|next| next.0.starts_with(&parent_branch_path)) { hash_builder.add_branch(path, branch_hash, false); } else { From e182df71a1150231dc4e2ecf89395a68b644c748 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 16 Nov 2024 06:25:02 +0100 Subject: [PATCH 507/970] chore(sdk): Remove duplicate trait `BlockHeader` (#12584) --- crates/primitives-traits/src/header/mod.rs | 64 ------------------- crates/primitives-traits/src/header/sealed.rs | 9 +-- 2 files changed, 5 insertions(+), 68 deletions(-) diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index ecd5725838e..b36a74471ff 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -7,72 +7,8 @@ pub use error::HeaderError; #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; -use alloy_consensus::Header; -use alloy_primitives::{Address, BlockNumber, B256, U256}; - /// Bincode-compatible header type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::sealed::serde_bincode_compat::SealedHeader; } - -/// Trait for extracting specific Ethereum block data from a header -pub trait BlockHeader { - /// Retrieves the beneficiary (miner) of the block - fn beneficiary(&self) -> Address; - - /// Retrieves the difficulty of the block - fn difficulty(&self) -> U256; - - /// Retrieves the block number - fn number(&self) -> BlockNumber; - - /// Retrieves the gas limit of the block - fn gas_limit(&self) -> u64; - - /// Retrieves the timestamp of the block - fn timestamp(&self) -> u64; - - /// Retrieves the mix hash of the block - fn mix_hash(&self) -> B256; - - /// Retrieves the base fee per gas of the block, if available - fn base_fee_per_gas(&self) -> Option; - - /// Retrieves the excess blob gas of the block, if available - fn excess_blob_gas(&self) -> Option; -} - -impl BlockHeader for Header { - fn beneficiary(&self) -> Address { - self.beneficiary - } - - fn difficulty(&self) -> U256 { - self.difficulty - } - - fn number(&self) -> BlockNumber { - self.number - } - - fn gas_limit(&self) -> u64 { - self.gas_limit - } - - fn timestamp(&self) -> u64 { - self.timestamp - } - - fn mix_hash(&self) -> B256 { - self.mix_hash - } - - fn base_fee_per_gas(&self) -> Option { - self.base_fee_per_gas - } - - fn excess_blob_gas(&self) -> Option { - self.excess_blob_gas - } -} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index dab54977c51..f4a365e1512 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,15 +1,16 @@ -use super::Header; -use crate::InMemorySize; -use alloy_consensus::Sealed; +use core::mem; + +use alloy_consensus::{Header, Sealed}; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; -use core::mem; use derive_more::{AsRef, Deref}; use reth_codecs::add_arbitrary_tests; use serde::{Deserialize, Serialize}; +use crate::InMemorySize; + /// A helper struct to store the block number/hash and its parent hash. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct BlockWithParent { From 3614a37ff6e7cf08c5015f0af866d68618d05649 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 16 Nov 2024 10:08:25 +0100 Subject: [PATCH 508/970] test(tx-pool): add more unit tests for parked pool (#12591) --- crates/transaction-pool/src/pool/parked.rs | 142 +++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 407f04fd5be..29216af47d0 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -916,4 +916,146 @@ mod tests { SenderTransactionCount { count: 1, last_submission_id: 3 } ); } + + #[test] + fn test_pool_size() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction with a specific size and add it to the pool + let tx = f.validated_arc(MockTransaction::eip1559().set_size(1024).clone()); + pool.add_transaction(tx); + + // Assert that the reported size of the pool is correct + assert_eq!(pool.size(), 1024); + } + + #[test] + fn test_pool_len() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Initially, the pool should have zero transactions + assert_eq!(pool.len(), 0); + + // Add a transaction to the pool and check the length + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_pool_contains() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + + // Before adding, the transaction should not be in the pool + assert!(!pool.contains(&tx_id)); + + // After adding, the transaction should be present in the pool + pool.add_transaction(tx); + assert!(pool.contains(&tx_id)); + } + + #[test] + fn test_get_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx.clone()); + + // Retrieve the transaction using `get()` and assert it matches the added transaction + let retrieved = pool.get(&tx_id).expect("Transaction should exist in the pool"); + assert_eq!(retrieved.transaction.id(), tx.id()); + } + + #[test] + fn test_all_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1.clone()); + pool.add_transaction(tx2.clone()); + + // Collect all transaction IDs from the pool + let all_txs: Vec<_> = pool.all().map(|tx| *tx.id()).collect(); + assert_eq!(all_txs.len(), 2); + + // Check that the IDs of both transactions are present + assert!(all_txs.contains(tx1.id())); + assert!(all_txs.contains(tx2.id())); + } + + #[test] + fn test_truncate_pool_edge_case() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Set a limit that matches the current number of transactions + let limit = SubPoolLimit { max_txs: 2, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // No transactions should be removed + assert!(removed.is_empty()); + + // Set a stricter limit that requires truncating one transaction + let limit = SubPoolLimit { max_txs: 1, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // One transaction should be removed, and the pool should have one left + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_satisfy_base_fee_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions with different max fees + let tx1 = f.validated_arc(MockTransaction::eip1559().set_max_fee(100).clone()); + let tx2 = f.validated_arc(MockTransaction::eip1559().set_max_fee(200).clone()); + pool.add_transaction(tx1); + pool.add_transaction(tx2.clone()); + + // Check that only the second transaction satisfies the base fee requirement + let satisfied = pool.satisfy_base_fee_transactions(150); + assert_eq!(satisfied.len(), 1); + assert_eq!(satisfied[0].id(), tx2.id()) + } + + #[test] + fn test_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Ensure the transaction is in the pool before removal + assert!(pool.contains(&tx_id)); + + // Remove the transaction and check that it is no longer in the pool + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert!(!pool.contains(&tx_id)); + } } From 1945cd8b425c1a90f6be6cdeabdc0a1cfa250203 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Nov 2024 10:37:14 +0100 Subject: [PATCH 509/970] chore: move hashing writer trait (#12597) --- crates/storage/provider/src/traits/mod.rs | 3 --- .../{provider/src/traits => storage-api/src}/hashing.rs | 0 crates/storage/storage-api/src/lib.rs | 2 ++ 3 files changed, 2 insertions(+), 3 deletions(-) rename crates/storage/{provider/src/traits => storage-api/src}/hashing.rs (100%) diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 5542ea168ab..dce9dd77bc2 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -17,9 +17,6 @@ pub use state::{StateChangeWriter, StateWriter}; pub use reth_chainspec::ChainSpecProvider; -mod hashing; -pub use hashing::HashingWriter; - mod trie; pub use trie::{StorageTrieWriter, TrieWriter}; diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/storage-api/src/hashing.rs similarity index 100% rename from crates/storage/provider/src/traits/hashing.rs rename to crates/storage/storage-api/src/hashing.rs diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 13a44b482a6..7b7ad761476 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -60,5 +60,7 @@ pub mod noop; mod history; pub use history::*; +mod hashing; +pub use hashing::*; mod stats; pub use stats::*; From d52c7194d122e5940692e6ff63dd01448bc70b0a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Nov 2024 11:32:56 +0100 Subject: [PATCH 510/970] chore: move triewriter trait (#12598) --- crates/storage/provider/src/traits/mod.rs | 3 -- crates/storage/provider/src/traits/trie.rs | 36 ---------------------- crates/storage/storage-api/src/trie.rs | 34 ++++++++++++++++++-- 3 files changed, 32 insertions(+), 41 deletions(-) delete mode 100644 crates/storage/provider/src/traits/trie.rs diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index dce9dd77bc2..69f053936bb 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -17,9 +17,6 @@ pub use state::{StateChangeWriter, StateWriter}; pub use reth_chainspec::ChainSpecProvider; -mod trie; -pub use trie::{StorageTrieWriter, TrieWriter}; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs deleted file mode 100644 index 2edb4e072dd..00000000000 --- a/crates/storage/provider/src/traits/trie.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::collections::HashMap; - -use alloy_primitives::B256; -use auto_impl::auto_impl; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; - -/// Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait TrieWriter: Send + Sync { - /// Writes trie updates to the database. - /// - /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; -} - -/// Storage Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. - /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. - /// - /// Returns the number of entries modified. - fn write_storage_trie_updates( - &self, - storage_tries: &HashMap, - ) -> ProviderResult; - - /// Writes storage trie updates for the given hashed address. - fn write_individual_storage_trie_updates( - &self, - hashed_address: B256, - updates: &StorageTrieUpdates, - ) -> ProviderResult; -} diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index f7d41066d06..c8f12da0716 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -4,8 +4,8 @@ use alloy_primitives::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::{StorageTrieUpdates, TrieUpdates}, + AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, }; /// A type that can compute the state root of a given post state. @@ -85,3 +85,33 @@ pub trait StateProofProvider: Send + Sync { target: HashedPostState, ) -> ProviderResult>; } + +/// Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait TrieWriter: Send + Sync { + /// Writes trie updates to the database. + /// + /// Returns the number of entries modified. + fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; +} + +/// Storage Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait StorageTrieWriter: Send + Sync { + /// Writes storage trie updates from the given storage trie map. + /// + /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates( + &self, + storage_tries: &std::collections::HashMap, + ) -> ProviderResult; + + /// Writes storage trie updates for the given hashed address. + fn write_individual_storage_trie_updates( + &self, + hashed_address: B256, + updates: &StorageTrieUpdates, + ) -> ProviderResult; +} From d9ed07a367f593605b76e9360c7e8711c6f7c041 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Nov 2024 11:33:09 +0100 Subject: [PATCH 511/970] chore: remove revm-primitives re-export (#12599) --- Cargo.lock | 3 ++- .../src/commands/debug_cmd/build_block.rs | 10 +++++--- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/ethereum/evm/src/lib.rs | 2 +- crates/ethereum/payload/Cargo.toml | 1 - crates/ethereum/payload/src/lib.rs | 7 +++--- crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/builder/mod.rs | 15 ++++++------ crates/optimism/evm/src/lib.rs | 16 ++++++------- crates/optimism/payload/src/builder.rs | 11 ++++----- crates/optimism/rpc/src/error.rs | 2 +- crates/optimism/rpc/src/eth/call.rs | 5 ++-- crates/optimism/rpc/src/eth/pending_block.rs | 6 ++--- crates/payload/primitives/Cargo.toml | 2 ++ crates/payload/primitives/src/error.rs | 2 +- crates/primitives/src/lib.rs | 1 - crates/primitives/src/receipt.rs | 3 +-- crates/revm/src/cached.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 12 ++++++---- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 9 ++++---- .../rpc-eth-api/src/helpers/pending_block.rs | 23 +++++++++---------- crates/rpc/rpc-eth-types/src/error/mod.rs | 2 +- crates/rpc/rpc/src/eth/bundle.rs | 23 ++++++++----------- crates/rpc/rpc/src/eth/sim_bundle.rs | 6 ++--- crates/trie/db/src/state.rs | 3 +-- .../custom-beacon-withdrawals/src/main.rs | 12 ++++------ examples/custom-evm/src/main.rs | 8 ++----- examples/stateful-precompile/src/main.rs | 10 ++++---- 28 files changed, 94 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 973ad471872..2b6ea4f3431 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7463,7 +7463,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-primitives", "tracing", ] @@ -8001,6 +8000,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", + "revm-primitives", "secp256k1", "tempfile", "tokio", @@ -8482,6 +8482,7 @@ dependencies = [ "reth-errors", "reth-primitives", "reth-transaction-pool", + "revm-primitives", "serde", "thiserror 1.0.69", "tokio", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 30af4c61c53..89eca6b776f 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -27,14 +27,18 @@ use reth_node_api::{ }; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - revm_primitives::KzgSettings, BlobTransaction, PooledTransactionsElement, SealedBlock, - SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + BlobTransaction, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, + Transaction, TransactionSigned, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::EnvKzgSettings}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + primitives::{EnvKzgSettings, KzgSettings}, +}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index c48e1548434..1a8a390e99d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1388,7 +1388,6 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - revm_primitives::AccountInfo, Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ @@ -1398,6 +1397,7 @@ mod tests { }, ProviderFactory, }; + use reth_revm::primitives::AccountInfo; use reth_stages_api::StageCheckpoint; use reth_trie::{root::state_root_unhashed, StateRoot}; use std::collections::HashMap; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 11e4acd4bfc..c8ed58df03b 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -201,10 +201,10 @@ mod tests { use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv, SpecId}; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 443e837b2ed..a29cc473362 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -30,7 +30,6 @@ reth-chainspec.workspace = true # ethereum revm.workspace = true -revm-primitives.workspace = true # alloy alloy-eips.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index c94cd8bb728..7d795c510e2 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -26,7 +26,6 @@ use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs::{self}, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, Block, BlockBody, EthereumHardforks, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; @@ -38,10 +37,12 @@ use reth_transaction_pool::{ use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, + primitives::{ + calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, + InvalidTransaction, ResultAndState, TxEnv, + }, DatabaseCommit, }; -use revm_primitives::{calc_excess_blob_gas, TxEnv}; use std::sync::Arc; use tracing::{debug, trace, warn}; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 09bdd8b2269..b0b62d1b2ed 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -63,6 +63,7 @@ reth-transaction-pool.workspace = true alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-consensus.workspace = true +revm-primitives.workspace = true ## async futures.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 2e00b08f8a5..89892ed5985 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -2,13 +2,6 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] -pub mod add_ons; -mod states; - -pub use states::*; - -use std::sync::Arc; - use crate::{ common::WithConfigs, components::NodeComponentsBuilder, @@ -38,13 +31,19 @@ use reth_node_core::{ node_config::NodeConfig, primitives::Head, }; -use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; +use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; +use std::sync::Arc; use tracing::{info, trace, warn}; +pub mod add_ons; + +mod states; +pub use states::*; + /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = FullNodeTypesAdapter< diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index dafb1676ebd..9569c1cb8b5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -17,12 +17,12 @@ use alloy_consensus::Header; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_optimism_chainspec::{DecodeError, OpChainSpec}; -use reth_primitives::{ - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - transaction::FillTxEnv, - Head, TransactionSigned, +use reth_primitives::{transaction::FillTxEnv, Head, TransactionSigned}; +use reth_revm::{ + inspector_handle_register, + primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Database, Evm, EvmBuilder, GetInspector, }; -use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; @@ -211,14 +211,12 @@ mod tests { AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; use reth_optimism_chainspec::BASE_MAINNET; - use reth_primitives::{ - revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, - Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, - }; + use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; use reth_revm::{ db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index d0eb464ae02..7047c587b0c 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -17,11 +17,7 @@ use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OpHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_payload_util::PayloadTransactions; -use reth_primitives::{ - proofs, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType, -}; +use reth_primitives::{proofs, Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType}; use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ @@ -30,7 +26,10 @@ use reth_transaction_pool::{ use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState, TxEnv}, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, InvalidTransaction, + ResultAndState, TxEnv, + }, Database, DatabaseCommit, }; use tracing::{debug, trace, warn}; diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 1dd7a639eac..caafe798c81 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -3,10 +3,10 @@ use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; use reth_optimism_evm::OpBlockExecutionError; -use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; +use revm::primitives::{InvalidTransaction, OptimismInvalidTransaction}; /// Optimism specific errors, that extend [`EthApiError`]. #[derive(Debug, thiserror::Error)] diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index ea0086aedfd..9495a359e32 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,15 +1,14 @@ +use crate::{OpEthApi, OpEthApiError}; use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; -use reth_primitives::revm_primitives::{BlockEnv, OptimismFields, TxEnv}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; - -use crate::{OpEthApi, OpEthApiError}; +use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi where diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 8356d72dbdc..782f78dd4aa 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,12 +1,13 @@ //! Loads OP pending block for a RPC response. +use crate::OpEthApi; use alloy_consensus::Header; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{revm_primitives::BlockEnv, Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, @@ -17,8 +18,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::TransactionPool; - -use crate::OpEthApi; +use revm::primitives::BlockEnv; impl LoadPendingBlock for OpEthApi where diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 951108e7da3..b1a115f12c8 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -25,6 +25,8 @@ alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } op-alloy-rpc-types-engine = { workspace = true, optional = true } +revm-primitives.workspace = true + # async async-trait.workspace = true tokio = { workspace = true, features = ["sync"] } diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index ab222f5f6ef..82891919feb 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -2,8 +2,8 @@ use alloy_primitives::B256; use reth_errors::{ProviderError, RethError}; -use reth_primitives::revm_primitives::EVMError; use reth_transaction_pool::BlobStoreError; +use revm_primitives::EVMError; use tokio::sync::oneshot; /// Possible error variants during payload building. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 87bf254edab..45067d60079 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -53,7 +53,6 @@ pub use transaction::{ // Re-exports pub use reth_ethereum_forks::*; -pub use revm_primitives::{self, JumpTable}; #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 41397181149..b7138183d11 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -545,8 +545,7 @@ impl Encodable for ReceiptWithBloomEncoder<'_> { #[cfg(test)] mod tests { use super::*; - use crate::revm_primitives::Bytes; - use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; use reth_codecs::Compact; #[test] diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index 88a41e1d895..5d5262adc5b 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -4,7 +4,7 @@ use alloy_primitives::{ Address, B256, U256, }; use core::cell::RefCell; -use reth_primitives::revm_primitives::{ +use revm::primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index d614018a407..1eade554fc1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -18,14 +18,16 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::{ +use reth_primitives::TransactionSigned; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, ResultAndState, TxEnv, }, - TransactionSigned, + DatabaseRef, }; -use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::ensure_success, diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 37a68577fb0..465c33ada38 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -6,11 +6,12 @@ use alloy_primitives::U256; use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; use futures::Future; use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; -use reth_primitives::revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo, -}; use reth_provider::{ChainSpecProvider, StateProvider}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo}, +}; use reth_rpc_eth_types::{ revm_utils::{apply_state_overrides, caller_gas_allowance}, EthApiError, RevertError, RpcInvalidTransactionError, diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 490447d6152..548f9101023 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -1,10 +1,8 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use std::time::{Duration, Instant}; - +use super::SpawnBlocking; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; - use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, @@ -19,27 +17,28 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, - revm_primitives::{ - BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, - ResultAndState, SpecId, - }, - Block, BlockBody, Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, Receipt, SealedBlockWithSenders, + SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, ReceiptProvider, StateProviderFactory, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{ + database::StateProviderDatabase, + primitives::{ + BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + ResultAndState, SpecId, + }, +}; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; +use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tracing::debug; -use super::SpawnBlocking; - /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 99c41daea37..893bbdd6b9c 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -10,7 +10,6 @@ use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::revm_primitives::InvalidHeader; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; @@ -20,6 +19,7 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, InvalidTransaction, OutOfGasError}; use revm_inspectors::tracing::MuxError; +use revm_primitives::InvalidHeader; use tracing::error; /// A trait to convert an error to an RPC error. diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index a2e0be30437..f92bd075a3b 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,31 +1,26 @@ //! `Eth` bundle implementation and helpers. -use std::sync::Arc; - use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::db::{DatabaseCommit, DatabaseRef}, - PooledTransactionsElement, -}; +use reth_primitives::PooledTransactionsElement; +use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + EthCallBundleApiServer, FromEthApiError, FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ - db::CacheDB, + db::{CacheDB, DatabaseCommit, DatabaseRef}, primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; +use std::sync::Arc; -use reth_provider::{ChainSpecProvider, HeaderProvider}; -use reth_rpc_eth_api::{ - helpers::{Call, EthTransactions, LoadPendingBlock}, - EthCallBundleApiServer, -}; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; /// `Eth` bundle implementation. pub struct EthBundle { /// All nested fields bundled together. diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 40d951f755f..f77b7e79da0 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -10,10 +10,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::db::{DatabaseCommit, DatabaseRef}, - TransactionSigned, -}; +use reth_primitives::TransactionSigned; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; @@ -26,6 +23,7 @@ use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, + DatabaseCommit, DatabaseRef, }; use std::{sync::Arc, time::Duration}; use tracing::info; diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 0d2171604d5..6e2cea5051d 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -265,8 +265,7 @@ mod tests { use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; - use reth_primitives::revm_primitives::AccountInfo; - use revm::db::BundleState; + use revm::{db::BundleState, primitives::AccountInfo}; #[test] fn from_bundle_state_with_rayon() { diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 43e5f7428f6..47adc64c004 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -15,7 +15,10 @@ use reth::{ providers::ProviderError, revm::{ interpreter::Host, - primitives::{Env, TransactTo, TxEnv}, + primitives::{ + address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, + TransactTo, TxEnv, U256, + }, Database, DatabaseCommit, Evm, State, }, }; @@ -26,12 +29,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; -use reth_primitives::{ - revm_primitives::{ - address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, - }, - BlockWithSenders, Receipt, -}; +use reth_primitives::{BlockWithSenders, Receipt}; use std::{fmt::Display, sync::Arc}; pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index c564c5b28b6..7a5278061f2 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -11,12 +11,11 @@ use reth::{ BuilderContext, NodeBuilder, }, payload::{EthBuiltPayload, EthPayloadBuilderAttributes}, - primitives::revm_primitives::{Env, PrecompileResult}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileOutput, PrecompileSpecId}, - primitives::BlockEnv, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, rpc::types::engine::PayloadAttributes, @@ -34,10 +33,7 @@ use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::{ - revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - TransactionSigned, -}; +use reth_primitives::TransactionSigned; use reth_tracing::{RethTracer, Tracer}; use std::{convert::Infallible, sync::Arc}; diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 5be45ad7674..f683af4e430 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -9,11 +9,14 @@ use parking_lot::RwLock; use reth::{ api::NextBlockEnvAttributes, builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, - primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileSpecId}, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, SpecId, StatefulPrecompileMut, + TxEnv, + }, ContextPrecompile, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, tasks::TaskManager, @@ -25,10 +28,7 @@ use reth_node_ethereum::{ node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::{ - revm_primitives::{SpecId, StatefulPrecompileMut}, - TransactionSigned, -}; +use reth_primitives::TransactionSigned; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; use std::{collections::HashMap, convert::Infallible, sync::Arc}; From dda19065885c3b0f32594e737e76dac5713b1bdf Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 16 Nov 2024 17:06:57 +0100 Subject: [PATCH 512/970] rm more generics when useless (#12595) --- crates/exex/exex/src/wal/mod.rs | 14 +++---- crates/net/eth-wire-types/src/blocks.rs | 16 +++---- crates/net/eth-wire-types/src/broadcast.rs | 2 +- crates/net/eth-wire-types/src/message.rs | 42 +++++-------------- crates/net/eth-wire-types/src/receipts.rs | 8 ++-- crates/net/eth-wire-types/src/state.rs | 8 ++-- crates/net/eth-wire-types/src/status.rs | 2 +- crates/net/eth-wire-types/src/transactions.rs | 22 ++++------ crates/net/nat/src/lib.rs | 2 +- .../net/network/src/transactions/fetcher.rs | 22 ++++------ crates/net/network/src/transactions/mod.rs | 2 +- crates/net/p2p/src/full_block.rs | 2 +- crates/rpc/rpc-eth-types/src/simulate.rs | 4 +- .../stages/src/stages/sender_recovery.rs | 6 +-- crates/stages/stages/src/stages/tx_lookup.rs | 4 +- 15 files changed, 60 insertions(+), 96 deletions(-) diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 00b0ea919ef..a2e8ee8e6c6 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -231,13 +231,13 @@ mod tests { use crate::wal::{cache::CachedBlock, Wal}; fn read_notifications(wal: &Wal) -> eyre::Result> { - let Some(files_range) = wal.inner.storage.files_range()? else { return Ok(Vec::new()) }; - - wal.inner - .storage - .iter_notifications(files_range) - .map(|entry| Ok(entry?.2)) - .collect::>() + wal.inner.storage.files_range()?.map_or(Ok(Vec::new()), |range| { + wal.inner + .storage + .iter_notifications(range) + .map(|entry| entry.map(|(_, _, n)| n)) + .collect() + }) } fn sort_committed_blocks( diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 1eb71082d81..06549e769e6 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -198,7 +198,7 @@ mod tests { fn encode_get_block_header_number() { let expected = hex!("ca820457c682270f050580"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -215,7 +215,7 @@ mod tests { #[test] fn decode_get_block_header_number() { let data = hex!("ca820457c682270f050580"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -234,7 +234,7 @@ mod tests { // [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ] let expected = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - RequestPair::> { + RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -269,7 +269,7 @@ mod tests { #[test] fn decode_block_header() { let data = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair::> { + let expected = RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -306,7 +306,7 @@ mod tests { fn encode_get_block_bodies() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -321,7 +321,7 @@ mod tests { #[test] fn decode_get_block_bodies() { let data = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -337,7 +337,7 @@ mod tests { fn encode_block_bodies() { let expected = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - let request = RequestPair::> { + let request = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { @@ -408,7 +408,7 @@ mod tests { #[test] fn decode_block_bodies() { let data = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair::> { + let expected = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 03222706992..f37c5e74a04 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -733,7 +733,7 @@ impl RequestTxHashes { impl FromIterator<(TxHash, Eth68TxMetadata)> for RequestTxHashes { fn from_iter>(iter: I) -> Self { - Self::new(iter.into_iter().map(|(hash, _)| hash).collect::>()) + Self::new(iter.into_iter().map(|(hash, _)| hash).collect()) } } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 2a6d973ffc3..f83e21124e3 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -77,52 +77,30 @@ impl ProtocolMessage { )?) } } - EthMessageID::GetBlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockHeaders(request_pair) - } - EthMessageID::BlockHeaders => { - let request_pair = RequestPair::>::decode(buf)?; - EthMessage::BlockHeaders(request_pair) - } - EthMessageID::GetBlockBodies => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockBodies(request_pair) - } - EthMessageID::BlockBodies => { - let request_pair = RequestPair::>::decode(buf)?; - EthMessage::BlockBodies(request_pair) - } + EthMessageID::GetBlockHeaders => EthMessage::GetBlockHeaders(RequestPair::decode(buf)?), + EthMessageID::BlockHeaders => EthMessage::BlockHeaders(RequestPair::decode(buf)?), + EthMessageID::GetBlockBodies => EthMessage::GetBlockBodies(RequestPair::decode(buf)?), + EthMessageID::BlockBodies => EthMessage::BlockBodies(RequestPair::decode(buf)?), EthMessageID::GetPooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetPooledTransactions(request_pair) + EthMessage::GetPooledTransactions(RequestPair::decode(buf)?) } EthMessageID::PooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::PooledTransactions(request_pair) + EthMessage::PooledTransactions(RequestPair::decode(buf)?) } EthMessageID::GetNodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetNodeData(request_pair) + EthMessage::GetNodeData(RequestPair::decode(buf)?) } EthMessageID::NodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::NodeData(request_pair) - } - EthMessageID::GetReceipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetReceipts(request_pair) - } - EthMessageID::Receipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::Receipts(request_pair) + EthMessage::NodeData(RequestPair::decode(buf)?) } + EthMessageID::GetReceipts => EthMessage::GetReceipts(RequestPair::decode(buf)?), + EthMessageID::Receipts => EthMessage::Receipts(RequestPair::decode(buf)?), }; Ok(Self { message_type, message }) } diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index db9d6f871e4..ca5e85a146f 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -54,7 +54,7 @@ mod tests { fn encode_get_receipts() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -72,7 +72,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -88,7 +88,7 @@ mod tests { fn encode_receipts() { let expected = hex!("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: Receipts(vec![vec![ ReceiptWithBloom { @@ -124,7 +124,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: Receipts(vec![ vec![ diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 16a2959b338..57273adc6b1 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -36,7 +36,7 @@ mod tests { fn encode_get_node_data() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -54,7 +54,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -69,7 +69,7 @@ mod tests { fn encode_node_data() { let expected = hex!("ce820457ca84deadc0de84feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), @@ -87,7 +87,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index d9e8d4319b5..fa73d0907fe 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -338,7 +338,7 @@ mod tests { let total_difficulty = U256::from(rng.gen::()); // create a genesis that has a random part, so we can check that the hash is preserved - let genesis = Genesis { nonce: rng.gen::(), ..Default::default() }; + let genesis = Genesis { nonce: rng.gen(), ..Default::default() }; // build head let head = Head { diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index dfedcb6f83e..8db96c10042 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -96,7 +96,7 @@ mod tests { fn encode_get_pooled_transactions() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -114,7 +114,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -182,7 +182,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: PooledTransactions(message), /* Assuming PooledTransactions wraps a * Vec */ @@ -248,10 +248,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected = RequestPair:: { - request_id: 1111, - message: PooledTransactions(message), - }; + let expected = RequestPair { request_id: 1111, message: PooledTransactions(message) }; let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!(request, expected); @@ -383,10 +380,8 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected_transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let expected_transactions = + RequestPair { request_id: 0, message: PooledTransactions(message) }; // checking tx by tx for easier debugging if there are any regressions for (decoded, expected) in @@ -522,10 +517,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let transactions = RequestPair { request_id: 0, message: PooledTransactions(message) }; let mut encoded = vec![]; transactions.encode(&mut encoded); diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 600ba97cd2d..962f1e49efd 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -111,7 +111,7 @@ impl FromStr for NatResolver { "Unknown Nat Resolver: {s}" ))) }; - Self::ExternalIp(ip.parse::()?) + Self::ExternalIp(ip.parse()?) } }; Ok(r) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 00a9158233b..4c4119c85c0 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -289,7 +289,7 @@ impl TransactionFetcher { // tx is really big, pack request with single tx if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request { - return hashes_from_announcement_iter.collect::() + return hashes_from_announcement_iter.collect() } acc_size_response = size; } @@ -688,10 +688,8 @@ impl TransactionFetcher { } let (response, rx) = oneshot::channel(); - let req: PeerRequest = PeerRequest::GetPooledTransactions { - request: GetPooledTransactions( - new_announced_hashes.iter().copied().collect::>(), - ), + let req = PeerRequest::GetPooledTransactions { + request: GetPooledTransactions(new_announced_hashes.iter().copied().collect()), response, }; @@ -1012,8 +1010,7 @@ impl TransactionFetcher { // self.try_buffer_hashes_for_retry(requested_hashes, &peer_id); - let transactions = - valid_payload.into_data().into_values().collect::(); + let transactions = valid_payload.into_data().into_values().collect(); FetchEvent::TransactionsFetched { peer_id, transactions } } @@ -1202,13 +1199,10 @@ impl DedupPayload for VerifiedPooledTransactions { } fn dedup(self) -> PartiallyValidData { - let Self { txns } = self; - let unique_fetched = txns - .into_iter() - .map(|tx| (*tx.hash(), tx)) - .collect::>(); - - PartiallyValidData::from_raw_data(unique_fetched, None) + PartiallyValidData::from_raw_data( + self.txns.into_iter().map(|tx| (*tx.hash(), tx)).collect(), + None, + ) } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 48ae61e0dd0..36abcd3d617 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -879,7 +879,7 @@ where .into_iter() .map(PooledTransactionsElement::try_from_broadcast) .filter_map(Result::ok) - .collect::(); + .collect(); self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 151a5bdd2a3..a966c01c933 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -351,7 +351,7 @@ where { /// Returns the block hashes for the given range, if they are available. pub fn range_block_hashes(&self) -> Option> { - self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect::>()) + self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect()) } /// Returns whether or not the bodies map is fully populated with requested headers and bodies. diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 9807010c0cf..b24ec9e86bc 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -282,7 +282,7 @@ pub fn build_block>( timestamp: block_env.timestamp.to(), base_fee_per_gas: Some(block_env.basefee.to()), gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum::(), + gas_used: calls.iter().map(|c| c.gas_used).sum(), blob_gas_used: Some(0), parent_hash, receipts_root: calculate_receipt_root(&receipts), @@ -306,6 +306,6 @@ pub fn build_block>( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block::(block, total_difficulty, txs_kind, None, tx_resp_builder)?; + let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a4eda6394c0..178fceb262b 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -552,7 +552,7 @@ mod tests { blocks[..=max_pruned_block as usize] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), + .sum(), ), prune_mode: PruneMode::Full, }, @@ -567,8 +567,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 60c958abf86..3fdcbd0da64 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -416,8 +416,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } From 735eb4b97cf3937886a2972647e0afb90c916a71 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 16 Nov 2024 21:22:17 +0400 Subject: [PATCH 513/970] chore(cli): unify trait bounds (#12604) Co-authored-by: Matthias Seitz --- bin/reth/src/commands/debug_cmd/build_block.rs | 15 +++++++-------- bin/reth/src/commands/debug_cmd/execution.rs | 13 +++++++------ .../src/commands/debug_cmd/in_memory_merkle.rs | 13 ++++++------- bin/reth/src/commands/debug_cmd/merkle.rs | 13 ++++++------- bin/reth/src/commands/debug_cmd/mod.rs | 6 ++---- bin/reth/src/commands/debug_cmd/replay_engine.rs | 15 ++++++--------- crates/cli/commands/src/common.rs | 9 +++++++-- crates/cli/commands/src/db/checksum.rs | 9 ++++++--- crates/cli/commands/src/db/mod.rs | 7 ++----- crates/cli/commands/src/db/stats.rs | 8 +++----- crates/cli/commands/src/import.rs | 5 ++--- crates/cli/commands/src/init_cmd.rs | 7 ++----- crates/cli/commands/src/init_state/mod.rs | 7 ++----- crates/cli/commands/src/prune.rs | 7 ++----- crates/cli/commands/src/recover/mod.rs | 4 ++-- crates/cli/commands/src/recover/storage_tries.rs | 5 ++--- crates/cli/commands/src/stage/drop.rs | 7 ++----- crates/cli/commands/src/stage/dump/execution.rs | 6 +++--- .../commands/src/stage/dump/hashing_account.rs | 5 ++--- .../commands/src/stage/dump/hashing_storage.rs | 5 ++--- crates/cli/commands/src/stage/dump/merkle.rs | 5 ++--- crates/cli/commands/src/stage/dump/mod.rs | 6 +++--- crates/cli/commands/src/stage/mod.rs | 4 ++-- crates/cli/commands/src/stage/run.rs | 5 ++--- crates/cli/commands/src/stage/unwind.rs | 9 +++------ .../optimism/cli/src/commands/build_pipeline.rs | 8 +++++--- crates/optimism/cli/src/commands/import.rs | 7 ++----- .../optimism/cli/src/commands/import_receipts.rs | 9 +++------ crates/optimism/cli/src/commands/init_state.rs | 7 ++----- 29 files changed, 97 insertions(+), 129 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 89eca6b776f..adb2c83b1b2 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -15,24 +15,23 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{ - EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, -}; +use reth_node_api::{EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ BlobTransaction, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, + StageCheckpointReader, StateProviderFactory, }; use reth_revm::{ cached::CachedReads, @@ -88,7 +87,7 @@ impl> Command { /// Fetches the best block block from the database. /// /// If the database is empty, returns the genesis block. - fn lookup_best_block>( + fn lookup_best_block>( &self, factory: ProviderFactory, ) -> RethResult> { @@ -123,7 +122,7 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 9056d3424c7..da928645b9f 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -8,7 +8,7 @@ use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -22,10 +22,11 @@ use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, + providers::ProviderNodeTypes, BlockExecutionWriter, ChainSpecProvider, ProviderFactory, + StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{ @@ -58,7 +59,7 @@ pub struct Command { } impl> Command { - fn build_pipeline, Client>( + fn build_pipeline, Client>( &self, config: &Config, client: Client, @@ -116,7 +117,7 @@ impl> Command { Ok(pipeline) } - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -160,7 +161,7 @@ impl> Command { } /// Execute `execution-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index d5bb8a87b22..ce5f318632e 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -10,7 +10,7 @@ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -19,12 +19,11 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StorageReader, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, AccountExtReader, + ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderFactory, StageCheckpointReader, StateWriter, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -56,7 +55,7 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -78,7 +77,7 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 9c77c70abc7..db4cd952e8d 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -6,7 +6,7 @@ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -17,12 +17,11 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, - DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, ProviderFactory, StateWriter, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, + ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -56,7 +55,7 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -78,7 +77,7 @@ impl> Command { } /// Execute `merkle-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 51681e8c59e..65329f41400 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -3,8 +3,8 @@ use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::CliNodeTypes; use reth_cli_runner::CliContext; -use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::EthEngineTypes; mod build_block; @@ -37,9 +37,7 @@ pub enum Subcommands { impl> Command { /// Execute `debug` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 9314a439265..7daead83a84 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -8,7 +8,7 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -18,13 +18,12 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{ - EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, -}; +use reth_node_api::{EngineApiMessageVersion, NodeTypesWithDBAdapter}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, }; use reth_prune::PruneModes; use reth_stages::Pipeline; @@ -56,7 +55,7 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -78,9 +77,7 @@ impl> Command { } /// Execute `debug replay-engine` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 21d24a7ff7a..0e4eb2723c3 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -53,7 +53,7 @@ pub struct EnvironmentArgs { impl> EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init>( + pub fn init>( &self, access: AccessRights, ) -> eyre::Result> { @@ -105,7 +105,7 @@ impl> Environmen /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory>( + fn create_provider_factory>( &self, config: &Config, db: Arc, @@ -188,3 +188,8 @@ impl AccessRights { matches!(self, Self::RW) } } + +/// Helper trait with a common set of requirements for the +/// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. +pub trait CliNodeTypes: NodeTypesWithEngine {} +impl CliNodeTypes for N where N: NodeTypesWithEngine {} diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 4c986dc0332..76d92962f72 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,11 +1,14 @@ -use crate::db::get::{maybe_json_value_parser, table_key}; +use crate::{ + common::CliNodeTypes, + db::get::{maybe_json_value_parser, table_key}, +}; use ahash::RandomState; use clap::Parser; use reth_chainspec::EthereumHardforks; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_provider::{providers::ProviderNodeTypes, DBProvider}; use std::{ hash::{BuildHasher, Hasher}, @@ -36,7 +39,7 @@ pub struct Command { impl Command { /// Execute `db checksum` command - pub fn execute>( + pub fn execute>( self, tool: &DbTool>>, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index e1a9a90bacc..e80b51160e2 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,10 +1,9 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithEngine; use std::io::{self, Write}; mod checksum; @@ -65,9 +64,7 @@ macro_rules! db_ro_exec { impl> Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain()); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 6865f01345e..71ea995800f 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -1,4 +1,4 @@ -use crate::db::checksum::ChecksumViewer; +use crate::{common::CliNodeTypes, db::checksum::ChecksumViewer}; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; @@ -9,9 +9,7 @@ use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Ta use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; -use reth_node_builder::{ - NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, -}; +use reth_node_builder::{NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; use reth_static_file_types::SegmentRangeInclusive; @@ -40,7 +38,7 @@ pub struct Command { impl Command { /// Execute `db stats` command - pub fn execute>( + pub fn execute>( self, data_dir: ChainPath, tool: &DbTool>>, diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index ebda2deafa6..539211a22f7 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,5 +1,5 @@ //! Command that initializes the node by importing a chain from a file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; @@ -20,7 +20,6 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; use reth_provider::{ @@ -60,7 +59,7 @@ impl> ImportComm /// Execute `import` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 5fde9ac0d0b..83f471d629d 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,10 +1,9 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::BlockHashReader; use tracing::info; @@ -17,9 +16,7 @@ pub struct InitCommand { impl> InitCommand { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth init starting"); let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index adde88870fe..2aa2483fdda 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,12 +1,11 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, @@ -68,9 +67,7 @@ pub struct InitStateCommand { impl> InitStateCommand { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index 7dbb66fc2fa..37f0637b0a5 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,9 +1,8 @@ //! Command that runs pruning without any limits. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use tracing::info; @@ -17,9 +16,7 @@ pub struct PruneCommand { impl> PruneCommand { /// Execute the `prune` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; let prune_config = config.prune.unwrap_or_default(); diff --git a/crates/cli/commands/src/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs index 3216449e49b..a2d94360227 100644 --- a/crates/cli/commands/src/recover/mod.rs +++ b/crates/cli/commands/src/recover/mod.rs @@ -1,10 +1,10 @@ //! `reth recover` command. +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_node_builder::NodeTypesWithEngine; mod storage_tries; @@ -24,7 +24,7 @@ pub enum Subcommands { impl> Command { /// Execute `recover` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 794058fac1d..f879c393c6b 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,4 +1,4 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -8,7 +8,6 @@ use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, transaction::DbTx, }; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -23,7 +22,7 @@ pub struct Command { impl> Command { /// Execute `storage-tries` recovery command - pub async fn execute>( + pub async fn execute>( self, _ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 70b2caa8d16..49bbc55ec24 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,5 +1,5 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -10,7 +10,6 @@ use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, DbTool, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; use reth_provider::{ writer::UnifiedStorageWriter, DatabaseProviderFactory, StaticFileProviderFactory, @@ -30,9 +29,7 @@ pub struct Command { impl> Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let tool = DbTool::new(provider_factory)?; diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 709fc59190d..19704cb1c2f 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -7,7 +7,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,7 +25,7 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -36,7 +36,7 @@ where if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 738dcabafa7..97452cee892 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -6,7 +6,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -15,7 +14,7 @@ use reth_provider::{ use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_account_stage( +pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -37,7 +36,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index 204c087a234..06b064bc02f 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -5,7 +5,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -14,7 +13,7 @@ use reth_provider::{ use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_storage_stage( +pub(crate) async fn dump_hashing_storage_stage>>( db_tool: &DbTool, from: u64, to: u64, @@ -27,7 +26,7 @@ pub(crate) async fn dump_hashing_storage_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index f7e9e2fc1af..f2688c365e1 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -9,7 +9,6 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,7 +24,7 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage( +pub(crate) async fn dump_merkle_stage>>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -54,7 +53,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 6fd2f23aa0e..36b8fb12258 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,5 +1,5 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -10,7 +10,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::{ args::DatadirArgs, dirs::{DataDirPath, PlatformPath}, @@ -92,7 +92,7 @@ impl> Command /// Execute `dump-stage` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 562bd73a28d..b9e0725428a 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -2,12 +2,12 @@ use std::sync::Arc; +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::NodeTypesWithEngine; pub mod drop; pub mod dump; @@ -43,7 +43,7 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 1ac2a12d6fa..f3c3bbef965 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -2,7 +2,7 @@ //! //! Stage debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -19,7 +19,6 @@ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; use reth_network_p2p::HeadersClient; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::{ args::{NetworkArgs, StageEnum}, version::{ @@ -106,7 +105,7 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index a5c9956c95b..e71861a988d 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,6 +1,6 @@ //! Unwinding a certain block range -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::{Parser, Subcommand}; @@ -13,7 +13,6 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, @@ -50,9 +49,7 @@ pub struct Command { impl> Command { /// Execute `db stage unwind` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let range = self.command.unwind_range(provider_factory.clone())?; @@ -116,7 +113,7 @@ impl> Command Ok(()) } - fn build_pipeline>( + fn build_pipeline>( self, config: Config, provider_factory: ProviderFactory, diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index a197f93a8b4..88dc0989717 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -11,11 +11,13 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithDB; use reth_node_events::node::NodeEvent; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::OpExecutorProvider; -use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; +use reth_provider::{ + providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, + ProviderFactory, +}; use reth_prune::PruneModes; use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; use reth_stages_types::StageId; @@ -36,7 +38,7 @@ pub(crate) async fn build_import_pipeline( disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: NodeTypesWithDB, + N: ProviderNodeTypes, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index e5f037c3d5c..5e3de5a8671 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -2,14 +2,13 @@ //! file. use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; @@ -42,9 +41,7 @@ pub struct ImportOpCommand { impl> ImportOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); info!(target: "reth::cli", diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index ca82cf73ea4..049e160ae23 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -5,14 +5,13 @@ use std::path::{Path, PathBuf}; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_db::tables; use reth_downloaders::{ file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, receipt_file_client::ReceiptFileClient, }; use reth_execution_types::ExecutionOutcome; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; @@ -48,9 +47,7 @@ pub struct ImportReceiptsOpCommand { impl> ImportReceiptsOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); debug!(target: "reth::cli", @@ -88,7 +85,7 @@ pub async fn import_receipts_from_file( filter: F, ) -> eyre::Result<()> where - N: NodeTypesWithDB, + N: ProviderNodeTypes, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 6be9b73c765..6a36f492c50 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -2,9 +2,8 @@ use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; use reth_primitives::SealedHeader; @@ -36,9 +35,7 @@ pub struct InitStateCommandOp { impl> InitStateCommandOp { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); let Environment { config, provider_factory, .. } = From bf92a5fb59861ccadc664c4b18f228a787f0e606 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 16 Nov 2024 18:36:57 +0100 Subject: [PATCH 514/970] feat: split reth-payload-primitives (#12600) --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 37 ++++-- Cargo.toml | 2 + crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/mod.rs | 3 +- crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/payload.rs | 5 +- crates/engine/local/Cargo.toml | 1 + crates/engine/local/src/miner.rs | 5 +- crates/engine/primitives/Cargo.toml | 1 + crates/engine/primitives/src/message.rs | 2 +- crates/engine/tree/Cargo.toml | 1 + crates/engine/tree/src/tree/mod.rs | 3 +- crates/ethereum/payload/Cargo.toml | 1 + crates/ethereum/payload/src/lib.rs | 3 +- crates/node/api/Cargo.toml | 1 + crates/node/api/src/lib.rs | 4 + crates/node/api/src/node.rs | 2 +- crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/launch/engine.rs | 4 +- crates/node/builder/src/rpc.rs | 2 +- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 3 +- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 5 +- crates/payload/builder-primitives/Cargo.toml | 33 ++++++ .../payload/builder-primitives/src/error.rs | 58 +++++++++ .../src/events.rs | 2 +- crates/payload/builder-primitives/src/lib.rs | 19 +++ .../payload/builder-primitives/src/traits.rs | 111 ++++++++++++++++++ crates/payload/builder/Cargo.toml | 1 + crates/payload/builder/src/lib.rs | 3 +- crates/payload/builder/src/service.rs | 6 +- crates/payload/builder/src/test_utils.rs | 3 +- crates/payload/builder/src/traits.rs | 5 +- crates/payload/primitives/Cargo.toml | 11 -- crates/payload/primitives/src/error.rs | 57 --------- crates/payload/primitives/src/lib.rs | 9 +- crates/payload/primitives/src/traits.rs | 109 ----------------- crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/error.rs | 3 +- examples/custom-engine-types/Cargo.toml | 1 - 42 files changed, 301 insertions(+), 222 deletions(-) create mode 100644 crates/payload/builder-primitives/Cargo.toml create mode 100644 crates/payload/builder-primitives/src/error.rs rename crates/payload/{primitives => builder-primitives}/src/events.rs (98%) create mode 100644 crates/payload/builder-primitives/src/lib.rs create mode 100644 crates/payload/builder-primitives/src/traits.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index c34f82d2e31..0d9c9b34a03 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -52,6 +52,7 @@ exclude_crates=( reth-optimism-payload-builder reth-optimism-rpc reth-payload-builder + reth-payload-builder-primitives reth-payload-primitives reth-rpc reth-rpc-api diff --git a/Cargo.lock b/Cargo.lock index 2b6ea4f3431..fefa6fb5c1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2867,7 +2867,6 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-payload-builder", - "reth-primitives", "reth-tracing", "reth-trie-db", "serde", @@ -6436,6 +6435,7 @@ dependencies = [ "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-primitives-traits", @@ -6480,6 +6480,7 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", @@ -7103,6 +7104,7 @@ dependencies = [ "reth-network-peers", "reth-node-builder", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-provider", "reth-rpc-layer", @@ -7164,6 +7166,7 @@ dependencies = [ "reth-ethereum-engine-primitives", "reth-evm", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-validator", "reth-provider", @@ -7185,6 +7188,7 @@ dependencies = [ "futures", "reth-errors", "reth-execution-types", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-trie", @@ -7250,6 +7254,7 @@ dependencies = [ "reth-metrics", "reth-network-p2p", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", @@ -7456,6 +7461,7 @@ dependencies = [ "reth-evm-ethereum", "reth-execution-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", @@ -7936,6 +7942,7 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-provider", "reth-tasks", @@ -7983,7 +7990,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", - "reth-payload-primitives", "reth-payload-validator", "reth-primitives", "reth-provider", @@ -8360,6 +8366,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-util", "reth-primitives", @@ -8459,6 +8466,7 @@ dependencies = [ "reth-chain-state", "reth-ethereum-engine-primitives", "reth-metrics", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "revm", @@ -8468,28 +8476,38 @@ dependencies = [ ] [[package]] -name = "reth-payload-primitives" +name = "reth-payload-builder-primitives" version = "1.1.1" dependencies = [ - "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "async-trait", - "op-alloy-rpc-types-engine", "pin-project", - "reth-chain-state", - "reth-chainspec", "reth-errors", - "reth-primitives", + "reth-payload-primitives", "reth-transaction-pool", "revm-primitives", - "serde", "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", ] +[[package]] +name = "reth-payload-primitives" +version = "1.1.1" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-engine", + "op-alloy-rpc-types-engine", + "reth-chain-state", + "reth-chainspec", + "reth-primitives", + "serde", + "thiserror 1.0.69", +] + [[package]] name = "reth-payload-util" version = "1.1.1" @@ -8887,6 +8905,7 @@ dependencies = [ "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", diff --git a/Cargo.toml b/Cargo.toml index 398be3e5faf..2f2f9aa884a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,6 +80,7 @@ members = [ "crates/optimism/storage", "crates/payload/basic/", "crates/payload/builder/", + "crates/payload/builder-primitives/", "crates/payload/primitives/", "crates/payload/validator/", "crates/payload/util/", @@ -380,6 +381,7 @@ reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-optimism-storage = { path = "crates/optimism/storage" } reth-payload-builder = { path = "crates/payload/builder" } +reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } reth-payload-util = { path = "crates/payload/util" } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index d926fc09c35..245ebe8541e 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -20,6 +20,7 @@ reth-errors.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-prune.workspace = true diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index bc6bd3bc4f4..0b93ae0f29a 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -23,7 +23,8 @@ use reth_network_p2p::{ }; use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{Head, SealedBlock, SealedHeader}; use reth_provider::{ diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index e56449551bb..c4c74ebcdf1 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -17,6 +17,7 @@ reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-provider.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 29aa11895b7..7828f61c2af 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,7 +1,8 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, PayloadBuilderAttributes}; +use reth::api::BuiltPayload; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; +use reth_payload_builder_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 2ab448e3bbf..a1b74d13fee 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -19,6 +19,7 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-provider.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 2085aa81f9b..3a0f5a2f192 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -7,9 +7,8 @@ use futures_util::{stream::Fuse, StreamExt}; use reth_chainspec::EthereumHardforks; use reth_engine_primitives::{BeaconEngineMessage, EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{ - BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, -}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{BuiltPayload, PayloadAttributesBuilder, PayloadKind, PayloadTypes}; use reth_provider::{BlockReader, ChainSpecProvider}; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_transaction_pool::TransactionPool; diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index de4786553d3..42cbd932d45 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-execution-types.workspace = true reth-payload-primitives.workspace = true +reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true reth-trie.workspace = true reth-errors.workspace = true diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index 11fd383a1e2..d8a4c1322ad 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -5,7 +5,7 @@ use alloy_rpc_types_engine::{ }; use futures::{future::Either, FutureExt}; use reth_errors::RethResult; -use reth_payload_primitives::PayloadBuilderError; +use reth_payload_builder_primitives::PayloadBuilderError; use std::{ fmt::Display, future::Future, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index fb259d08560..278457145e7 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -23,6 +23,7 @@ reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 67e692b5c6c..39843377684 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -33,7 +33,8 @@ use reth_engine_primitives::{ use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{Block, GotExpected, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index a29cc473362..4e0880d1d15 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -18,6 +18,7 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-execution-types.workspace = true reth-basic-payload-builder.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 7d795c510e2..2b55ea87dd0 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -23,7 +23,8 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes} use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, Block, BlockBody, EthereumHardforks, Receipt, diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index a4cc0eb7eb6..ab4595d3362 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -18,6 +18,7 @@ reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-network-api.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 099cf82b5fe..105cac47d94 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -16,6 +16,10 @@ pub use reth_engine_primitives::*; pub use reth_payload_primitives as payload; pub use reth_payload_primitives::*; +/// Traits and helper types used to abstract over payload builder types. +pub use reth_payload_builder_primitives as payload_builder; +pub use reth_payload_builder_primitives::*; + /// Traits and helper types used to abstract over EVM methods and types. pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 90b9e2999bf..5d25d8d592c 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -9,7 +9,7 @@ use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; -use reth_payload_primitives::PayloadBuilder; +use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index b0b62d1b2ed..781112d93c8 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -41,7 +41,6 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 65433176ba9..86ab0b9a3d7 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -19,7 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, + BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, + PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -27,7 +28,6 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_payload_primitives::PayloadBuilder; use reth_primitives::EthereumHardforks; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; use reth_tasks::TaskExecutor; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 9680c221d7c..adee942748c 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -11,13 +11,13 @@ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, + PayloadBuilder, }; use reth_node_core::{ node_config::NodeConfig, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; -use reth_payload_primitives::PayloadBuilder; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 19a47be4951..7f47da7e236 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 7047c587b0c..3644d8f71a5 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -15,7 +15,8 @@ use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OpHardforks; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::PayloadTransactions; use reth_primitives::{proofs, Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType}; use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 5e9e524f79b..0315f73cae4 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -19,6 +19,7 @@ reth-primitives-traits.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-evm.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index a905f854448..e3193ec6deb 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -17,9 +17,8 @@ use futures_util::FutureExt; use reth_chainspec::EthereumHardforks; use reth_evm::state_change::post_block_withdrawals_balance_increments; use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; -use reth_payload_primitives::{ - BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, -}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; use reth_primitives::{proofs, SealedHeader}; use reth_primitives_traits::constants::RETH_CLIENT_VERSION; use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; diff --git a/crates/payload/builder-primitives/Cargo.toml b/crates/payload/builder-primitives/Cargo.toml new file mode 100644 index 00000000000..c3665dbc58e --- /dev/null +++ b/crates/payload/builder-primitives/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "reth-payload-builder-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-errors.workspace = true +reth-payload-primitives.workspace = true +reth-transaction-pool.workspace = true +revm-primitives.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } + +# async +async-trait.workspace = true +pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true + +# misc +thiserror.workspace = true +tracing.workspace = true diff --git a/crates/payload/builder-primitives/src/error.rs b/crates/payload/builder-primitives/src/error.rs new file mode 100644 index 00000000000..0d988c829e4 --- /dev/null +++ b/crates/payload/builder-primitives/src/error.rs @@ -0,0 +1,58 @@ +//! Error types emitted by types or implementations of this crate. + +use alloy_primitives::B256; +use reth_errors::{ProviderError, RethError}; +use reth_transaction_pool::BlobStoreError; +use revm_primitives::EVMError; +use tokio::sync::oneshot; + +/// Possible error variants during payload building. +#[derive(Debug, thiserror::Error)] +pub enum PayloadBuilderError { + /// Thrown when the parent header cannot be found + #[error("missing parent header: {0}")] + MissingParentHeader(B256), + /// Thrown when the parent block is missing. + #[error("missing parent block {0}")] + MissingParentBlock(B256), + /// An oneshot channels has been closed. + #[error("sender has been dropped")] + ChannelClosed, + /// If there's no payload to resolve. + #[error("missing payload")] + MissingPayload, + /// Error occurring in the blob store. + #[error(transparent)] + BlobStore(#[from] BlobStoreError), + /// Other internal error + #[error(transparent)] + Internal(#[from] RethError), + /// Unrecoverable error during evm execution. + #[error("evm execution error: {0}")] + EvmExecutionError(EVMError), + /// Any other payload building errors. + #[error(transparent)] + Other(Box), +} + +impl PayloadBuilderError { + /// Create a new error from a boxed error. + pub fn other(error: E) -> Self + where + E: core::error::Error + Send + Sync + 'static, + { + Self::Other(Box::new(error)) + } +} + +impl From for PayloadBuilderError { + fn from(error: ProviderError) -> Self { + Self::Internal(RethError::Provider(error)) + } +} + +impl From for PayloadBuilderError { + fn from(_: oneshot::error::RecvError) -> Self { + Self::ChannelClosed + } +} diff --git a/crates/payload/primitives/src/events.rs b/crates/payload/builder-primitives/src/events.rs similarity index 98% rename from crates/payload/primitives/src/events.rs rename to crates/payload/builder-primitives/src/events.rs index 3fb3813adb1..d51f13f7c4c 100644 --- a/crates/payload/primitives/src/events.rs +++ b/crates/payload/builder-primitives/src/events.rs @@ -1,4 +1,4 @@ -use crate::PayloadTypes; +use reth_payload_primitives::PayloadTypes; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/payload/builder-primitives/src/lib.rs b/crates/payload/builder-primitives/src/lib.rs new file mode 100644 index 00000000000..003a385c6c0 --- /dev/null +++ b/crates/payload/builder-primitives/src/lib.rs @@ -0,0 +1,19 @@ +//! This crate defines abstractions to create and update payloads (blocks) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod error; +pub use error::PayloadBuilderError; + +mod events; +pub use crate::events::{Events, PayloadEvents}; + +/// Contains the payload builder trait to abstract over payload attributes. +mod traits; +pub use traits::{PayloadBuilder, PayloadStoreExt}; diff --git a/crates/payload/builder-primitives/src/traits.rs b/crates/payload/builder-primitives/src/traits.rs new file mode 100644 index 00000000000..b5e8910b6c2 --- /dev/null +++ b/crates/payload/builder-primitives/src/traits.rs @@ -0,0 +1,111 @@ +use crate::{PayloadBuilderError, PayloadEvents}; +use alloy_rpc_types_engine::PayloadId; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; +use std::fmt::Debug; +use tokio::sync::oneshot; + +/// A helper trait for internal usage to retrieve and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve(&self, id: PayloadId) -> Option> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option>; +} + +#[async_trait::async_trait] +impl PayloadStoreExt for P +where + P: PayloadBuilder, +{ + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) + } + + async fn best_payload( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) + } + + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) + } +} + +/// A type that can request, subscribe to and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadBuilder: Debug + Send + Sync + Unpin { + /// The Payload type for the builder. + type PayloadType: PayloadTypes; + /// The error type returned by the builder. + type Error: Into; + + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + fn send_new_payload( + &self, + attr: ::PayloadBuilderAttributes, + ) -> oneshot::Receiver>; + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Sends a message to the service to subscribe to payload events. + /// Returns a receiver that will receive them. + async fn subscribe(&self) -> Result, Self::Error>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option::PayloadBuilderAttributes, Self::Error>>; +} diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 8b2fef7b878..78814da5066 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives = { workspace = true, optional = true } reth-chain-state.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index da44072c99d..0887a5ca74a 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -113,7 +113,8 @@ pub mod noop; pub mod test_utils; pub use alloy_rpc_types::engine::PayloadId; -pub use reth_payload_primitives::{PayloadBuilderError, PayloadKind}; +pub use reth_payload_builder_primitives::PayloadBuilderError; +pub use reth_payload_primitives::PayloadKind; pub use service::{ PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, }; diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 267a1e355b0..af11ba75ce6 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -10,10 +10,10 @@ use crate::{ use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; -use reth_payload_primitives::{ - BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadKind, PayloadStoreExt, PayloadTypes, +use reth_payload_builder_primitives::{ + Events, PayloadBuilder, PayloadBuilderError, PayloadEvents, PayloadStoreExt, }; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind, PayloadTypes}; use std::{ fmt, future::Future, diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 780df5c8463..5025a12ed71 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -7,7 +7,8 @@ use crate::{ use alloy_primitives::U256; use reth_chain_state::{CanonStateNotification, ExecutedBlock}; -use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; use reth_primitives::Block; use std::{ future::Future, diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index ba8486b6907..d9d54ccd0e4 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,9 +1,8 @@ //! Trait abstractions used by the payload crate. use reth_chain_state::CanonStateNotification; -use reth_payload_primitives::{ - BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, -}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; use std::future::Future; /// A type that can build a payload. diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index b1a115f12c8..332964de96b 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -14,9 +14,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-errors.workspace = true reth-primitives.workspace = true -reth-transaction-pool.workspace = true reth-chain-state.workspace = true # alloy @@ -25,18 +23,9 @@ alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } op-alloy-rpc-types-engine = { workspace = true, optional = true } -revm-primitives.workspace = true - -# async -async-trait.workspace = true -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -pin-project.workspace = true - # misc serde.workspace = true thiserror.workspace = true -tracing.workspace = true [features] op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 82891919feb..67b6dbe4b93 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,62 +1,5 @@ //! Error types emitted by types or implementations of this crate. -use alloy_primitives::B256; -use reth_errors::{ProviderError, RethError}; -use reth_transaction_pool::BlobStoreError; -use revm_primitives::EVMError; -use tokio::sync::oneshot; - -/// Possible error variants during payload building. -#[derive(Debug, thiserror::Error)] -pub enum PayloadBuilderError { - /// Thrown when the parent header cannot be found - #[error("missing parent header: {0}")] - MissingParentHeader(B256), - /// Thrown when the parent block is missing. - #[error("missing parent block {0}")] - MissingParentBlock(B256), - /// An oneshot channels has been closed. - #[error("sender has been dropped")] - ChannelClosed, - /// If there's no payload to resolve. - #[error("missing payload")] - MissingPayload, - /// Error occurring in the blob store. - #[error(transparent)] - BlobStore(#[from] BlobStoreError), - /// Other internal error - #[error(transparent)] - Internal(#[from] RethError), - /// Unrecoverable error during evm execution. - #[error("evm execution error: {0}")] - EvmExecutionError(EVMError), - /// Any other payload building errors. - #[error(transparent)] - Other(Box), -} - -impl PayloadBuilderError { - /// Create a new error from a boxed error. - pub fn other(error: E) -> Self - where - E: core::error::Error + Send + Sync + 'static, - { - Self::Other(Box::new(error)) - } -} - -impl From for PayloadBuilderError { - fn from(error: ProviderError) -> Self { - Self::Internal(RethError::Provider(error)) - } -} - -impl From for PayloadBuilderError { - fn from(_: oneshot::error::RecvError) -> Self { - Self::ChannelClosed - } -} - /// Thrown when the payload or attributes are known to be invalid before processing. /// /// This is used mainly for diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 3604ff5d8d8..a2bdb58bc51 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,18 +9,13 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; - -pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; - -mod events; -pub use crate::events::{Events, PayloadEvents}; +pub use error::{EngineObjectValidationError, VersionSpecificValidationError}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. mod traits; pub use traits::{ - BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilder, - PayloadBuilderAttributes, PayloadStoreExt, + BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 197a7fe3af9..8d5c429e6c6 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,3 @@ -use crate::{PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, eip7685::Requests, @@ -7,114 +6,6 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; use reth_primitives::SealedBlock; -use std::fmt::Debug; -use tokio::sync::oneshot; - -/// A type that can request, subscribe to and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadBuilder: Debug + Send + Sync + Unpin { - /// The Payload type for the builder. - type PayloadType: PayloadTypes; - /// The error type returned by the builder. - type Error: Into; - - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// Returns a receiver that will receive the payload id. - fn send_new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job and returns the best payload that has been built so far. - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job as fast and possible and returns the best payload that has been - /// built so far. - async fn resolve( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>> { - self.resolve_kind(id, PayloadKind::Earliest).await - } - - /// Sends a message to the service to subscribe to payload events. - /// Returns a receiver that will receive them. - async fn subscribe(&self) -> Result, Self::Error>; - - /// Returns the payload attributes associated with the given identifier. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option::PayloadBuilderAttributes, Self::Error>>; -} - -/// A helper trait for internal usage to retrieve and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { - /// Resolves the payload job and returns the best payload that has been built so far. - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option>; - - /// Resolves the payload job as fast and possible and returns the best payload that has been - /// built so far. - async fn resolve(&self, id: PayloadId) -> Option> { - self.resolve_kind(id, PayloadKind::Earliest).await - } - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option>; - - /// Returns the payload attributes associated with the given identifier. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option>; -} - -#[async_trait::async_trait] -impl PayloadStoreExt for P -where - P: PayloadBuilder, -{ - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option> { - Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) - } - - async fn best_payload( - &self, - id: PayloadId, - ) -> Option> { - Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) - } - - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) - } -} /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 62d1eea3225..4854ac44dc5 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -19,6 +19,7 @@ reth-rpc-api.workspace = true reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 82665ca35fd..4210d415bfe 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -4,7 +4,8 @@ use jsonrpsee_types::error::{ }; use reth_beacon_consensus::BeaconForkChoiceUpdateError; use reth_engine_primitives::BeaconOnNewPayloadError; -use reth_payload_primitives::{EngineObjectValidationError, PayloadBuilderError}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::EngineObjectValidationError; use thiserror::Error; /// The Engine API result type diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 9afd16bea16..d6642a8edfe 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -10,7 +10,6 @@ reth.workspace = true reth-chainspec.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true From 2f3fde8fb5f55b32efacb1824df1c88dd3021444 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sun, 17 Nov 2024 11:26:15 +0100 Subject: [PATCH 515/970] feat: fix WASM build for crates dependent on reth-payload-primitives (#12610) --- .github/assets/check_wasm.sh | 7 --- Cargo.lock | 8 +-- crates/chain-state/Cargo.toml | 2 +- crates/ethereum/payload/src/lib.rs | 8 ++- crates/payload/builder-primitives/Cargo.toml | 5 -- .../payload/builder-primitives/src/error.rs | 58 ------------------- crates/payload/builder-primitives/src/lib.rs | 5 +- crates/payload/primitives/Cargo.toml | 4 ++ crates/payload/primitives/src/error.rs | 53 +++++++++++++++++ crates/payload/primitives/src/lib.rs | 2 +- 10 files changed, 69 insertions(+), 83 deletions(-) delete mode 100644 crates/payload/builder-primitives/src/error.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 0d9c9b34a03..0e704857edb 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -15,7 +15,6 @@ exclude_crates=( reth-beacon-consensus reth-bench reth-blockchain-tree - reth-chain-state reth-cli reth-cli-commands reth-cli-runner @@ -26,13 +25,11 @@ exclude_crates=( reth-dns-discovery reth-downloaders reth-e2e-test-utils - reth-engine-primitives reth-engine-service reth-engine-tree reth-engine-util reth-eth-wire reth-ethereum-cli - reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl reth-exex @@ -41,7 +38,6 @@ exclude_crates=( reth-net-nat reth-network reth-node-api - reth-node-types reth-node-builder reth-node-core reth-node-ethereum @@ -51,9 +47,6 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc - reth-payload-builder - reth-payload-builder-primitives - reth-payload-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util diff --git a/Cargo.lock b/Cargo.lock index fefa6fb5c1d..7ff1c650bda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8479,15 +8479,10 @@ dependencies = [ name = "reth-payload-builder-primitives" version = "1.1.1" dependencies = [ - "alloy-primitives", "alloy-rpc-types-engine", "async-trait", "pin-project", - "reth-errors", "reth-payload-primitives", - "reth-transaction-pool", - "revm-primitives", - "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -8503,9 +8498,12 @@ dependencies = [ "op-alloy-rpc-types-engine", "reth-chain-state", "reth-chainspec", + "reth-errors", "reth-primitives", + "revm-primitives", "serde", "thiserror 1.0.69", + "tokio", ] [[package]] diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 0a2f53715ff..ff62b76e5df 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -27,7 +27,7 @@ alloy-primitives.workspace = true alloy-consensus.workspace = true # async -tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } +tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } tokio-stream = { workspace = true, features = ["sync"] } # tracing diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 2b55ea87dd0..4ec1e212c8d 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -397,9 +397,11 @@ where // only determine cancun fields when active if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) { // grab the blob sidecars from the executed txs - blob_sidecars = pool.get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), - )?; + blob_sidecars = pool + .get_all_blobs_exact( + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), + ) + .map_err(PayloadBuilderError::other)?; excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); diff --git a/crates/payload/builder-primitives/Cargo.toml b/crates/payload/builder-primitives/Cargo.toml index c3665dbc58e..6d89ea89d03 100644 --- a/crates/payload/builder-primitives/Cargo.toml +++ b/crates/payload/builder-primitives/Cargo.toml @@ -13,13 +13,9 @@ workspace = true [dependencies] # reth -reth-errors.workspace = true reth-payload-primitives.workspace = true -reth-transaction-pool.workspace = true -revm-primitives.workspace = true # alloy -alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } # async @@ -29,5 +25,4 @@ tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true # misc -thiserror.workspace = true tracing.workspace = true diff --git a/crates/payload/builder-primitives/src/error.rs b/crates/payload/builder-primitives/src/error.rs deleted file mode 100644 index 0d988c829e4..00000000000 --- a/crates/payload/builder-primitives/src/error.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Error types emitted by types or implementations of this crate. - -use alloy_primitives::B256; -use reth_errors::{ProviderError, RethError}; -use reth_transaction_pool::BlobStoreError; -use revm_primitives::EVMError; -use tokio::sync::oneshot; - -/// Possible error variants during payload building. -#[derive(Debug, thiserror::Error)] -pub enum PayloadBuilderError { - /// Thrown when the parent header cannot be found - #[error("missing parent header: {0}")] - MissingParentHeader(B256), - /// Thrown when the parent block is missing. - #[error("missing parent block {0}")] - MissingParentBlock(B256), - /// An oneshot channels has been closed. - #[error("sender has been dropped")] - ChannelClosed, - /// If there's no payload to resolve. - #[error("missing payload")] - MissingPayload, - /// Error occurring in the blob store. - #[error(transparent)] - BlobStore(#[from] BlobStoreError), - /// Other internal error - #[error(transparent)] - Internal(#[from] RethError), - /// Unrecoverable error during evm execution. - #[error("evm execution error: {0}")] - EvmExecutionError(EVMError), - /// Any other payload building errors. - #[error(transparent)] - Other(Box), -} - -impl PayloadBuilderError { - /// Create a new error from a boxed error. - pub fn other(error: E) -> Self - where - E: core::error::Error + Send + Sync + 'static, - { - Self::Other(Box::new(error)) - } -} - -impl From for PayloadBuilderError { - fn from(error: ProviderError) -> Self { - Self::Internal(RethError::Provider(error)) - } -} - -impl From for PayloadBuilderError { - fn from(_: oneshot::error::RecvError) -> Self { - Self::ChannelClosed - } -} diff --git a/crates/payload/builder-primitives/src/lib.rs b/crates/payload/builder-primitives/src/lib.rs index 003a385c6c0..af7ad736d44 100644 --- a/crates/payload/builder-primitives/src/lib.rs +++ b/crates/payload/builder-primitives/src/lib.rs @@ -8,12 +8,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod error; -pub use error::PayloadBuilderError; - mod events; pub use crate::events::{Events, PayloadEvents}; /// Contains the payload builder trait to abstract over payload attributes. mod traits; pub use traits::{PayloadBuilder, PayloadStoreExt}; + +pub use reth_payload_primitives::PayloadBuilderError; diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 332964de96b..d4070b4688e 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -14,9 +14,12 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-errors.workspace = true reth-primitives.workspace = true reth-chain-state.workspace = true +revm-primitives.workspace = true + # alloy alloy-eips.workspace = true alloy-primitives.workspace = true @@ -26,6 +29,7 @@ op-alloy-rpc-types-engine = { workspace = true, optional = true } # misc serde.workspace = true thiserror.workspace = true +tokio = { workspace = true, default-features = false, features = ["sync"] } [features] op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 67b6dbe4b93..d2e57da5791 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,5 +1,58 @@ //! Error types emitted by types or implementations of this crate. +use alloy_primitives::B256; +use reth_errors::{ProviderError, RethError}; +use revm_primitives::EVMError; +use tokio::sync::oneshot; + +/// Possible error variants during payload building. +#[derive(Debug, thiserror::Error)] +pub enum PayloadBuilderError { + /// Thrown when the parent header cannot be found + #[error("missing parent header: {0}")] + MissingParentHeader(B256), + /// Thrown when the parent block is missing. + #[error("missing parent block {0}")] + MissingParentBlock(B256), + /// An oneshot channels has been closed. + #[error("sender has been dropped")] + ChannelClosed, + /// If there's no payload to resolve. + #[error("missing payload")] + MissingPayload, + /// Other internal error + #[error(transparent)] + Internal(#[from] RethError), + /// Unrecoverable error during evm execution. + #[error("evm execution error: {0}")] + EvmExecutionError(EVMError), + /// Any other payload building errors. + #[error(transparent)] + Other(Box), +} + +impl PayloadBuilderError { + /// Create a new error from a boxed error. + pub fn other(error: E) -> Self + where + E: core::error::Error + Send + Sync + 'static, + { + Self::Other(Box::new(error)) + } +} + +impl From for PayloadBuilderError { + fn from(error: ProviderError) -> Self { + Self::Internal(RethError::Provider(error)) + } +} + +impl From for PayloadBuilderError { + fn from(_: oneshot::error::RecvError) -> Self { + Self::ChannelClosed + } +} + /// Thrown when the payload or attributes are known to be invalid before processing. /// /// This is used mainly for diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index a2bdb58bc51..0ff4810b864 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,7 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; -pub use error::{EngineObjectValidationError, VersionSpecificValidationError}; +pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. From 7ae8ce1d0096a32211cda406c1f1176cfc217b43 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 17 Nov 2024 17:48:27 +0100 Subject: [PATCH 516/970] chore(sdk): Add blanket impls for refs to prim traits (#12613) --- Cargo.lock | 1 + .../execution-types/src/execution_outcome.rs | 58 +++++++++---------- crates/primitives-traits/Cargo.toml | 1 + crates/primitives-traits/src/block/body.rs | 2 +- crates/primitives-traits/src/block/header.rs | 2 - crates/primitives-traits/src/block/mod.rs | 3 +- crates/primitives-traits/src/receipt.rs | 6 +- crates/primitives-traits/src/size.rs | 1 + .../primitives-traits/src/transaction/mod.rs | 1 + .../src/transaction/signed.rs | 29 +++++----- crates/primitives/src/block.rs | 4 +- crates/primitives/src/receipt.rs | 3 + crates/primitives/src/transaction/mod.rs | 6 -- 13 files changed, 57 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ff1c650bda..23503d90756 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8582,6 +8582,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "auto_impl", "bincode", "byteorder", "bytes", diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index c1d9c701650..412269ace9c 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; -use reth_primitives_traits::Receipt; +use reth_primitives_traits::{receipt::ReceiptExt, Receipt}; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, @@ -182,36 +182,6 @@ impl ExecutionOutcome { Some(index as usize) } - /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> - where - T: Receipt, - { - let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) - } - - /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option - where - T: Receipt, - { - Some(logs_bloom(self.logs(block_number)?)) - } - - /// Returns the receipt root for all recorded receipts. - /// Note: this function calculated Bloom filters for every receipt and created merkle trees - /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option - where - T: Receipt, - { - #[cfg(feature = "optimism")] - panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); - #[cfg(not(feature = "optimism"))] - self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) - } - /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. @@ -364,6 +334,32 @@ impl ExecutionOutcome { } } +impl ExecutionOutcome { + /// Returns an iterator over all block logs. + pub fn logs(&self, block_number: BlockNumber) -> Option> { + let index = self.block_number_to_index(block_number)?; + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) + } + + /// Return blocks logs bloom + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + Some(logs_bloom(self.logs(block_number)?)) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option + where + T: ReceiptExt, + { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) + } +} + impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { Self { diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 30f1c43c86a..651583f8e4d 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -27,6 +27,7 @@ byteorder = "1" derive_more.workspace = true roaring = "0.10.2" serde_with = { workspace = true, optional = true } +auto_impl.workspace = true # required by reth-codecs bytes.workspace = true diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index bb52b89724b..e9aadf40957 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -5,6 +5,7 @@ use alloc::fmt; use alloy_consensus::Transaction; /// Abstraction for block's body. +#[auto_impl::auto_impl(&, Arc)] pub trait BlockBody: Send + Sync @@ -19,7 +20,6 @@ pub trait BlockBody: + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize - + 'static { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 0c1fc3e57f2..779df442538 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -26,7 +26,6 @@ pub trait BlockHeader: + alloy_consensus::BlockHeader + Sealable + InMemorySize - + 'static { } @@ -46,6 +45,5 @@ impl BlockHeader for T where + alloy_consensus::BlockHeader + Sealable + InMemorySize - + 'static { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 33008c4381d..6bef9ea167f 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -18,6 +18,7 @@ impl FullBlock for T where T: Block + Compact {} // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing // senders +#[auto_impl::auto_impl(&, Arc)] pub trait Block: Send + Sync @@ -32,7 +33,7 @@ pub trait Block: + InMemorySize { /// Header part of the block. - type Header: BlockHeader; + type Header: BlockHeader + 'static; /// The block's body contains the transactions in the block. type Body: Send + Sync + Unpin + 'static; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index f3c9ef06356..31bded015d4 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -10,9 +10,10 @@ use serde::{Deserialize, Serialize}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + Compact {} -impl FullReceipt for T where T: Receipt + Compact {} +impl FullReceipt for T where T: ReceiptExt + Compact {} /// Abstraction of a receipt. +#[auto_impl::auto_impl(&, Arc)] pub trait Receipt: Send + Sync @@ -28,7 +29,10 @@ pub trait Receipt: { /// Returns transaction type. fn tx_type(&self) -> u8; +} +/// Extension if [`Receipt`] used in block execution. +pub trait ReceiptExt: Receipt { /// Calculates the receipts root of the given receipts. fn receipts_root(receipts: &[&Self]) -> B256; } diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 0c250688e05..7d83a8af8c4 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -1,4 +1,5 @@ /// Trait for calculating a heuristic for the in-memory size of a struct. +#[auto_impl::auto_impl(&, Arc, Box)] pub trait InMemorySize { /// Returns a heuristic for the in-memory size of a struct. fn size(&self) -> usize; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 4d7ab78685f..33ee36090ac 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -53,6 +53,7 @@ impl Transaction for T where } /// Extension trait of [`alloy_consensus::Transaction`]. +#[auto_impl::auto_impl(&, Arc)] pub trait TransactionExt: alloy_consensus::Transaction { /// Transaction envelope type ID. type Type: TxType; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 455a9886eb8..958d5cd6c77 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -8,7 +8,7 @@ use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use reth_codecs::Compact; use revm_primitives::TxEnv; -use crate::{transaction::TransactionExt, FullTransaction, MaybeArbitrary, Transaction}; +use crate::{FullTransaction, MaybeArbitrary, Transaction}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullSignedTx: SignedTransaction + Compact {} @@ -16,6 +16,7 @@ pub trait FullSignedTx: SignedTransaction + Compac impl FullSignedTx for T where T: SignedTransaction + Compact {} /// A signed transaction. +#[auto_impl::auto_impl(&, Arc)] pub trait SignedTransaction: Send + Sync @@ -32,7 +33,7 @@ pub trait SignedTransaction: + alloy_rlp::Decodable + Encodable2718 + Decodable2718 - + TransactionExt + + alloy_consensus::Transaction + MaybeArbitrary { /// Transaction type that is signed. @@ -65,14 +66,6 @@ pub trait SignedTransaction: /// `reth_primitives::transaction::recover_signer_unchecked`. fn recover_signer_unchecked(&self) -> Option
; - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: PrimitiveSignature, - ) -> Self; - /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. fn recalculate_hash(&self) -> B256 { @@ -83,10 +76,14 @@ pub trait SignedTransaction: fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); } -impl TransactionExt for T { - type Type = ::Type; - - fn signature_hash(&self) -> B256 { - self.transaction().signature_hash() - } +/// Helper trait used in testing. +#[cfg(feature = "test-utils")] +pub trait SignedTransactionTesting: SignedTransaction { + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + fn from_transaction_and_signature( + transaction: Self::Transaction, + signature: PrimitiveSignature, + ) -> Self; } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index d6476c29b4c..94dd578493c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -465,8 +465,8 @@ where impl reth_primitives_traits::Block for SealedBlock where - H: reth_primitives_traits::BlockHeader, - B: reth_primitives_traits::BlockBody, + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, Self: Serialize + for<'a> Deserialize<'a>, { type Header = H; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index b7138183d11..b61ee7c14d2 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -10,6 +10,7 @@ use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; use derive_more::{DerefMut, From, IntoIterator}; +use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; #[cfg(feature = "reth-codec")] @@ -97,7 +98,9 @@ impl reth_primitives_traits::Receipt for Receipt { fn tx_type(&self) -> u8 { self.tx_type as u8 } +} +impl ReceiptExt for Receipt { fn receipts_root(_receipts: &[&Self]) -> B256 { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f325b72776f..015621cdcce 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1367,12 +1367,6 @@ impl SignedTransaction for TransactionSigned { recover_signer_unchecked(&self.signature, signature_hash) } - fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - let mut initial_tx = Self { transaction, hash: Default::default(), signature }; - initial_tx.hash = initial_tx.recalculate_hash(); - initial_tx - } - fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { tx_env.caller = sender; match self.as_ref() { From cfd452353744bbacdd073b9196c32bb905b8f829 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 10:17:55 +0100 Subject: [PATCH 517/970] feat: add merge_if_module_configured (#12608) --- crates/rpc/rpc-builder/src/lib.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ab68d3c88e4..0d86c838d51 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1999,6 +1999,29 @@ impl TransportRpcModules { &self.config } + /// Merge the given [`Methods`] in all configured transport modules if the given + /// [`RethRpcModule`] is configured for the transport. + /// + /// Fails if any of the methods in other is present already. + pub fn merge_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.merge_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.merge_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.merge_ipc(other)?; + } + + Ok(()) + } + /// Merge the given [Methods] in the configured http methods. /// /// Fails if any of the methods in other is present already. From 5056a081123d40e6f4bcdc1bf7338e01b3c5ee9c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Nov 2024 10:03:54 +0100 Subject: [PATCH 518/970] fix(deps): Fix dev-deps for `reth-primitives` (#12612) --- .github/assets/check_wasm.sh | 1 + crates/primitives/Cargo.toml | 4 ++-- testing/testing-utils/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 0e704857edb..35f4bdda5b8 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -68,6 +68,7 @@ exclude_crates=( reth-static-file # tokio reth-transaction-pool # c-kzg reth-trie-parallel # tokio + reth-testing-utils ) # Array to hold the results diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 34d04c94edc..c9043a2bd11 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -62,11 +62,11 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # eth -reth-chainspec.workspace = true +reth-chainspec = { workspace = true, features = ["arbitrary"] } reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true -reth-trie-common.workspace = true +reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 3e0f58a7bd0..d0de37bf77f 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives = { workspace = true, features = ["secp256k1", "arbitrary"] } alloy-genesis.workspace = true alloy-primitives.workspace = true From 4b4f9cf40626c8714f0a0caa152a970fa6e5dfa0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 11:53:12 +0100 Subject: [PATCH 519/970] feat: install op debug exeuction witness (#12622) --- Cargo.lock | 1 + crates/node/builder/src/rpc.rs | 32 ++++++++++++++++++++++++---- crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/src/node.rs | 34 ++++++++++++++++++++---------- crates/optimism/rpc/src/witness.rs | 2 +- 5 files changed, 54 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23503d90756..ded071c5dc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8335,6 +8335,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-server-types", "reth-tracing", "reth-transaction-pool", "reth-trie-db", diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index adee942748c..fda8b66f8d7 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -399,7 +399,7 @@ where } } -impl NodeAddOns for RpcAddOns +impl RpcAddOns where N: FullNodeComponents< Types: ProviderNodeTypes, @@ -408,9 +408,16 @@ where EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, { - type Handle = RpcHandle; - - async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. + pub async fn launch_add_ons_with( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(&mut TransportRpcModules) -> eyre::Result<()>, + { let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; let engine_validator = engine_validator_builder.build(&ctx).await?; @@ -467,6 +474,7 @@ where let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; + ext(ctx.modules)?; extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); @@ -513,6 +521,22 @@ where } } +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents< + Types: ProviderNodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, + EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EV: EngineValidatorBuilder, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + self.launch_add_ons_with(ctx, |_| Ok(())).await + } +} + /// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher /// implementations. pub trait RethRpcAddOns: diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 9a80c83deec..03ea75a26cd 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -29,6 +29,7 @@ reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true reth-trie-db.workspace = true +reth-rpc-server-types.workspace = true # op-reth reth-optimism-payload-builder.workspace = true diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0c2186c7268..238953c9d57 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1,7 +1,11 @@ //! Optimism Node types config. -use std::sync::Arc; - +use crate::{ + args::RollupArgs, + engine::OpEngineValidator, + txpool::{OpTransactionPool, OpTransactionValidator}, + OpEngineTypes, +}; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; @@ -23,23 +27,21 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; -use reth_optimism_rpc::OpEthApi; +use reth_optimism_rpc::{ + witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, + OpEthApi, +}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; +use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; - -use crate::{ - args::RollupArgs, - engine::OpEngineValidator, - txpool::{OpTransactionPool, OpTransactionValidator}, - OpEngineTypes, -}; +use std::sync::Arc; /// Optimism primitive types. #[derive(Debug, Default, Clone)] @@ -163,7 +165,17 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - self.0.launch_add_ons(ctx).await + // install additional OP specific rpc methods + let debug_ext = + OpDebugWitnessApi::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + + self.0 + .launch_add_ons_with(ctx, move |modules| { + debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); + modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; + Ok(()) + }) + .await } } diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index 0521fa9025d..ed9d77e73e8 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_payload_builder::OpPayloadBuilder; use reth_primitives::SealedHeader; use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; -use reth_rpc_api::DebugExecutionWitnessApiServer; +pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use std::{fmt::Debug, sync::Arc}; From 1acdf9b2d43958eec0bd7b2dd449978d8b4d7862 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 12:33:38 +0100 Subject: [PATCH 520/970] fix: add additional op checks for chain specific check (#12623) --- crates/node/builder/src/launch/common.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 972fdc640df..41fbf93e05d 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -819,7 +819,10 @@ where /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { - if self.chain_id() == Chain::optimism_mainnet() { + if self.chain_spec().is_optimism() && + !self.is_dev() && + self.chain_id() == Chain::optimism_mainnet() + { let latest = self.blockchain_db().last_block_number()?; // bedrock height if latest < 105235063 { From 8aaac6d7c264912917b834f651874a9db04ce982 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 12:47:30 +0100 Subject: [PATCH 521/970] feat: add TokioTaskExecutor::boxed (#12619) --- crates/tasks/src/lib.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 28b5eaba9ff..340e925ec56 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -111,6 +111,13 @@ dyn_clone::clone_trait_object!(TaskSpawner); #[non_exhaustive] pub struct TokioTaskExecutor; +impl TokioTaskExecutor { + /// Converts the instance to a boxed [`TaskSpawner`]. + pub fn boxed(self) -> Box { + Box::new(self) + } +} + impl TaskSpawner for TokioTaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { tokio::task::spawn(fut) From 4daec16272d9f4b2c641b92ca2f42c1fbcb9b9db Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 12:52:36 +0100 Subject: [PATCH 522/970] feat: add EthereumEthApiTypes (#12618) --- crates/rpc/rpc/src/eth/helpers/types.rs | 18 +++++++++++++++++- crates/rpc/rpc/src/eth/mod.rs | 5 ++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index d1ce84bc0b7..8f135a9103b 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -4,11 +4,27 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_eth_api::EthApiTypes; use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; +/// A standalone [`EthApiTypes`] implementation for Ethereum. +#[derive(Debug, Clone, Copy, Default)] +pub struct EthereumEthApiTypes(EthTxBuilder); + +impl EthApiTypes for EthereumEthApiTypes { + type Error = EthApiError; + type NetworkTypes = Ethereum; + type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.0 + } +} + /// Builds RPC transaction response for l1. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] pub struct EthTxBuilder; impl TransactionCompat for EthTxBuilder diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 4d1833add3e..d8a5b95f55e 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -13,6 +13,9 @@ pub use core::EthApi; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::{signer::DevSigner, types::EthTxBuilder}; +pub use helpers::{ + signer::DevSigner, + types::{EthTxBuilder, EthereumEthApiTypes}, +}; pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; From 378e097aeadfce3b6506146a3af70b406b35e1ef Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Mon, 18 Nov 2024 18:56:10 +0700 Subject: [PATCH 523/970] chore(sdk): Add InMemorySize as super trait (#12615) --- crates/optimism/primitives/src/tx_type.rs | 10 +++++++++- crates/primitives-traits/src/receipt.rs | 3 +++ .../primitives-traits/src/transaction/signed.rs | 3 ++- crates/primitives-traits/src/tx_type.rs | 3 +++ crates/primitives/src/receipt.rs | 17 +++++++++++++++++ crates/primitives/src/transaction/mod.rs | 14 ++++++++------ crates/primitives/src/transaction/tx_type.rs | 9 +++++++++ crates/transaction-pool/src/test_utils/mock.rs | 1 + 8 files changed, 52 insertions(+), 8 deletions(-) diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index 1b505920120..70f5fd32d8e 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -13,7 +13,7 @@ use derive_more::{ Display, }; use op_alloy_consensus::OpTxType as AlloyOpTxType; -use reth_primitives_traits::TxType; +use reth_primitives_traits::{InMemorySize, TxType}; #[cfg(feature = "reth-codec")] use alloy_consensus::constants::EIP7702_TX_TYPE_ID; @@ -57,6 +57,14 @@ impl TxType for OpTxType { } } +impl InMemorySize for OpTxType { + /// Calculates a heuristic for the in-memory size of the [`OpTxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for U8 { fn from(tx_type: OpTxType) -> Self { Self::from(u8::from(tx_type)) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 31bded015d4..b34590dff0e 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -7,6 +7,8 @@ use core::fmt; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; +use crate::InMemorySize; + /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + Compact {} @@ -25,6 +27,7 @@ pub trait Receipt: + alloy_rlp::Encodable + alloy_rlp::Decodable + Serialize + + InMemorySize + for<'de> Deserialize<'de> { /// Returns transaction type. diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 958d5cd6c77..7b6abbaec0f 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -8,7 +8,7 @@ use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use reth_codecs::Compact; use revm_primitives::TxEnv; -use crate::{FullTransaction, MaybeArbitrary, Transaction}; +use crate::{FullTransaction, InMemorySize, MaybeArbitrary, Transaction}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullSignedTx: SignedTransaction + Compact {} @@ -35,6 +35,7 @@ pub trait SignedTransaction: + Decodable2718 + alloy_consensus::Transaction + MaybeArbitrary + + InMemorySize { /// Transaction type that is signed. type Transaction: Transaction; diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs index b1828ad57d9..d9ef687759e 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/tx_type.rs @@ -3,6 +3,8 @@ use core::fmt; use alloy_primitives::{U64, U8}; use reth_codecs::Compact; +use crate::InMemorySize; + /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. pub trait FullTxType: TxType + Compact {} @@ -29,6 +31,7 @@ pub trait TxType: + TryFrom + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize { /// Returns `true` if this is a legacy transaction. fn is_legacy(&self) -> bool; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index b61ee7c14d2..f4567de421e 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,5 +1,6 @@ use alloc::{vec, vec::Vec}; use core::cmp::Ordering; +use reth_primitives_traits::InMemorySize; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, @@ -109,6 +110,22 @@ impl ReceiptExt for Receipt { } } +impl InMemorySize for Receipt { + /// Calculates a heuristic for the in-memory size of the [Receipt]. + #[inline] + fn size(&self) -> usize { + let total_size = self.tx_type.size() + + core::mem::size_of::() + + core::mem::size_of::() + + self.logs.capacity() * core::mem::size_of::(); + + #[cfg(feature = "optimism")] + return total_size + 2 * core::mem::size_of::>(); + #[cfg(not(feature = "optimism"))] + total_size + } +} + /// A collection of receipts organized as a two-dimensional vector. #[derive( Clone, diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 015621cdcce..aa57ef8d81e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1274,12 +1274,6 @@ impl TransactionSigned { initial_tx } - /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() - } - /// Decodes legacy transaction from the data buffer into a tuple. /// /// This expects `rlp(legacy_tx)` @@ -1447,6 +1441,14 @@ impl SignedTransaction for TransactionSigned { } } +impl InMemorySize for TransactionSigned { + /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. + #[inline] + fn size(&self) -> usize { + mem::size_of::() + self.transaction.size() + mem::size_of::() + } +} + impl alloy_consensus::Transaction for TransactionSigned { fn chain_id(&self) -> Option { self.deref().chain_id() diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 3445cb184c1..caa6d872854 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -5,6 +5,7 @@ use alloy_consensus::constants::{ use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use derive_more::Display; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// Identifier parameter for legacy transaction @@ -118,6 +119,14 @@ impl reth_primitives_traits::TxType for TxType { } } +impl InMemorySize for TxType { + /// Calculates a heuristic for the in-memory size of the [`TxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for u8 { fn from(value: TxType) -> Self { match value { diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index fc43349f3f1..69f1835edcf 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -28,6 +28,7 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; +use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. From 626224e301f3fe1bd1faf2a20c486af13692d8f8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Mon, 18 Nov 2024 18:57:22 +0700 Subject: [PATCH 524/970] chore: refactor `MockTransaction` (#12627) --- crates/net/network/src/transactions/mod.rs | 2 +- .../network/tests/it/big_pooled_txs_req.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 12 +- .../transaction-pool/src/test_utils/mock.rs | 156 ++---------------- crates/transaction-pool/tests/it/blobs.rs | 13 +- crates/transaction-pool/tests/it/evict.rs | 7 +- crates/transaction-pool/tests/it/listeners.rs | 4 +- crates/transaction-pool/tests/it/pending.rs | 4 +- 9 files changed, 40 insertions(+), 162 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 36abcd3d617..0ccb4252ac3 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -2230,7 +2230,7 @@ mod tests { .add_transaction(reth_transaction_pool::TransactionOrigin::External, tx.clone()) .await; - let request = GetPooledTransactions(vec![tx.get_hash()]); + let request = GetPooledTransactions(vec![*tx.get_hash()]); let (send, receive) = oneshot::channel::>(); diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 4d65e3f63ba..9e0f69160b6 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -35,7 +35,7 @@ async fn test_large_tx_req() { tx }) .collect(); - let txs_hashes: Vec = txs.iter().map(|tx| tx.get_hash()).collect(); + let txs_hashes: Vec = txs.iter().map(|tx| *tx.get_hash()).collect(); // setup testnet let mut net = Testnet::create_with(2, MockEthProvider::default()).await; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 6441ed687f2..3f7ecfa7836 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -1302,7 +1302,7 @@ mod tests { // Insert the sidecar into the blob store if the current index is within the blob limit. if n < blob_limit.max_txs { - blob_store.insert(tx.get_hash(), sidecar.clone()).unwrap(); + blob_store.insert(*tx.get_hash(), sidecar.clone()).unwrap(); } // Add the transaction to the pool with external origin and valid outcome. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 040deb15fcb..537162ac76c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -2486,8 +2486,7 @@ mod tests { let tx = MockTransaction::eip1559().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip4844().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip4844().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let blob = f.validated(tx); let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2502,8 +2501,7 @@ mod tests { let tx = MockTransaction::eip4844().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip1559().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip1559().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let tx = f.validated(tx); let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2622,7 +2620,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); let err = @@ -2654,7 +2652,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); pool.insert_tx( @@ -2829,7 +2827,7 @@ mod tests { let mut changed_senders = HashMap::default(); changed_senders.insert( id.sender, - SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, + SenderInfo { state_nonce: next.nonce(), balance: U256::from(1_000) }, ); let outcome = pool.update_accounts(changed_senders); assert_eq!(outcome.discarded.len(), 1); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 69f1835edcf..56acbb107f3 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -69,7 +69,7 @@ macro_rules! get_value { MockTransaction::Legacy { $field, .. } | MockTransaction::Eip1559 { $field, .. } | MockTransaction::Eip4844 { $field, .. } | - MockTransaction::Eip2930 { $field, .. } => $field.clone(), + MockTransaction::Eip2930 { $field, .. } => $field, } }; } @@ -91,7 +91,7 @@ macro_rules! make_setters_getters { } /// Gets the value of the specified field. - pub fn [](&self) -> $t { + pub const fn [](&self) -> &$t { get_value!(self => $name) } )*} @@ -582,30 +582,15 @@ impl PoolTransaction for MockTransaction { } fn hash(&self) -> &TxHash { - match self { - Self::Legacy { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip4844 { hash, .. } | - Self::Eip2930 { hash, .. } => hash, - } + self.get_hash() } fn sender(&self) -> Address { - match self { - Self::Legacy { sender, .. } | - Self::Eip1559 { sender, .. } | - Self::Eip4844 { sender, .. } | - Self::Eip2930 { sender, .. } => *sender, - } + *self.get_sender() } fn nonce(&self) -> u64 { - match self { - Self::Legacy { nonce, .. } | - Self::Eip1559 { nonce, .. } | - Self::Eip4844 { nonce, .. } | - Self::Eip2930 { nonce, .. } => *nonce, - } + *self.get_nonce() } fn cost(&self) -> U256 { @@ -622,7 +607,7 @@ impl PoolTransaction for MockTransaction { } fn gas_limit(&self) -> u64 { - self.get_gas_limit() + *self.get_gas_limit() } fn max_fee_per_gas(&self) -> u128 { @@ -703,22 +688,12 @@ impl PoolTransaction for MockTransaction { /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { - match self { - Self::Legacy { .. } => &[], - Self::Eip1559 { input, .. } | - Self::Eip4844 { input, .. } | - Self::Eip2930 { input, .. } => input, - } + self.get_input() } /// Returns the size of the transaction. fn size(&self) -> usize { - match self { - Self::Legacy { size, .. } | - Self::Eip1559 { size, .. } | - Self::Eip4844 { size, .. } | - Self::Eip2930 { size, .. } => *size, - } + *self.get_size() } /// Returns the transaction type as a byte identifier. @@ -1007,109 +982,14 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; - use reth_primitives_traits::size::InMemorySize; - - arb::<(Transaction, Address, B256)>() - .prop_map(|(tx, sender, tx_hash)| match &tx { - Transaction::Legacy(TxLegacy { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => Self::Legacy { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - size: tx.size(), - }, - - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - access_list, - input, - }) => Self::Eip2930 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => Self::Eip1559 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - max_fee_per_blob_gas, - access_list, - blob_versioned_hashes: _, - }) => Self::Eip4844 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - // only generate a sidecar if it is a 4844 tx - also for the sake of - // performance just use a default sidecar - sidecar: BlobTransactionSidecar::default(), - size: tx.size(), - }, - #[allow(unreachable_patterns)] - _ => unimplemented!(), + + arb::<(TransactionSigned, Address)>() + .prop_map(|(signed_transaction, signer)| { + TransactionSignedEcRecovered::from_signed_transaction(signed_transaction, signer) + .try_into() + .expect( + "Failed to create an Arbitrary MockTransaction via TransactionSignedEcRecovered", + ) }) .boxed() } @@ -1128,8 +1008,8 @@ pub struct MockTransactionFactory { impl MockTransactionFactory { /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { - let sender = self.ids.sender_id_or_create(tx.get_sender()); - TransactionId::new(sender, tx.get_nonce()) + let sender = self.ids.sender_id_or_create(tx.sender()); + TransactionId::new(sender, tx.nonce()) } /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 0cdc6d088c0..9417c62278b 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - TransactionOrigin, TransactionPool, + PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -16,23 +16,22 @@ async fn blobs_exclusive() { .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); - assert_eq!(hash, blob_tx.transaction.get_hash()); + assert_eq!(hash, *blob_tx.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash()); assert!(best_txns.next().is_none()); - let eip1559_tx = MockTransaction::eip1559() - .set_sender(blob_tx.transaction.get_sender()) - .inc_price_by(10_000); + let eip1559_tx = + MockTransaction::eip1559().set_sender(blob_tx.transaction.sender()).inc_price_by(10_000); let res = txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err(); - assert_eq!(res.hash, eip1559_tx.get_hash()); + assert_eq!(res.hash, *eip1559_tx.get_hash()); match res.kind { PoolErrorKind::ExistingConflictingTransactionType(addr, tx_type) => { - assert_eq!(addr, eip1559_tx.get_sender()); + assert_eq!(addr, eip1559_tx.sender()); assert_eq!(tx_type, eip1559_tx.tx_type()); } _ => unreachable!(), diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index fea50962fd9..3b74b8cb230 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -8,7 +8,8 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + BlockInfo, PoolConfig, PoolTransaction, SubPoolLimit, TransactionOrigin, TransactionPool, + TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -87,7 +88,7 @@ async fn only_blobs_eviction() { let set = set.into_vec(); // ensure that the first nonce is 0 - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); // and finally insert it into the pool let results = pool.add_transactions(TransactionOrigin::External, set).await; @@ -194,7 +195,7 @@ async fn mixed_eviction() { ); let set = set.into_inner().into_vec(); - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index ad13af22a6a..0f8a0b19e2b 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -33,11 +33,11 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, - Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() + Some(FullTransactionEvent::Pending(hash)) if hash == *transaction.transaction.get_hash() ); } diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index 0b6349b24cc..be559c71eec 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } From a84f58bcb8e329999d842716e604ff97477c8970 Mon Sep 17 00:00:00 2001 From: Oliver Date: Mon, 18 Nov 2024 13:00:57 +0100 Subject: [PATCH 525/970] chore: use keccak256 from alloy (#12628) --- crates/trie/common/src/key.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs index 9e440d199fa..71f8019bff5 100644 --- a/crates/trie/common/src/key.rs +++ b/crates/trie/common/src/key.rs @@ -1,5 +1,4 @@ -use alloy_primitives::B256; -use revm_primitives::keccak256; +use alloy_primitives::{keccak256, B256}; /// Trait for hashing keys in state. pub trait KeyHasher: Default + Clone + Send + Sync + 'static { From b5bb3157c31eb98363a9a0a541c67acd8e3a59a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Nov 2024 14:19:30 +0100 Subject: [PATCH 526/970] test: add tests for base l1 blockinfo (#12609) --- crates/optimism/rpc/src/eth/receipt.rs | 47 ++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f3d16b4adb5..a801a408fd5 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -233,13 +233,12 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { + use super::*; use alloy_primitives::hex; use op_alloy_network::eip2718::Decodable2718; - use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; use reth_primitives::{Block, BlockBody}; - use super::*; - /// OP Mainnet transaction at index 0 in block 124665056. /// /// @@ -342,4 +341,46 @@ mod test { "incorrect l1 blob base fee scalar" ); } + + // + #[test] + fn base_receipt_gas_fields() { + // https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e + let system = hex!("7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9"); + let tx_0 = TransactionSigned::decode_2718(&mut &system[..]).unwrap(); + + let block = Block { + body: BlockBody { transactions: vec![tx_0], ..Default::default() }, + ..Default::default() + }; + let l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 + let tx = hex!("02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"); + let tx_1 = TransactionSigned::decode_2718(&mut &tx[..]).unwrap(); + + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) + .l1_block_info(&BASE_MAINNET, &tx_1, l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); + assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used"); + assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee"); + assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar"); + assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar"); + assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee"); + assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); + } } From 32a4d9ea0838626799efe3fd7ef09cfb4380ffac Mon Sep 17 00:00:00 2001 From: wizard <112275929+famouswizard@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:27:35 +0300 Subject: [PATCH 527/970] Fix grammatical error in lib.rs (#12632) --- bin/reth/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 6b71f48de12..53c592063ec 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -15,7 +15,7 @@ //! - `min-error-logs`: Disables all logs below `error` level. //! - `min-warn-logs`: Disables all logs below `warn` level. //! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer -//! calls to the logging component is made. +//! calls to the logging component are made. //! - `min-debug-logs`: Disables all logs below `debug` level. //! - `min-trace-logs`: Disables all logs below `trace` level. From 66887bbfaa2f2cb906f117001d9452d2c1bae6b6 Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:28:15 +0300 Subject: [PATCH 528/970] Typo Update profiling.md (#12631) --- book/developers/profiling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/developers/profiling.md b/book/developers/profiling.md index f1fdf520eb2..956bc563303 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -25,7 +25,7 @@ In this tutorial, we will be reviewing: [Jemalloc](https://jemalloc.net/) is a general-purpose allocator that is used [across the industry in production](https://engineering.fb.com/2011/01/03/core-data/scalable-memory-allocation-using-jemalloc/), well known for its performance benefits, predictability, and profiling capabilities. We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities. -Jemalloc also provides tools for analyzing and visualizing its the allocation profiles it generates, notably `jeprof`. +Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`. #### Enabling jemalloc in reth From 773f558ad647c6c0af2f8e7d573f064508beb16d Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:37:13 +0300 Subject: [PATCH 529/970] Fix Minor Documentation Errors (#12601) --- book/run/config.md | 6 +++--- book/run/sync-op-mainnet.md | 2 +- book/run/transactions.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/book/run/config.md b/book/run/config.md index 10fd40ca763..bb28d855de8 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -36,7 +36,7 @@ The defaults shipped with Reth try to be relatively reasonable, but may not be o ### `headers` -The headers section controls both the behavior of the header stage, which download historical headers, as well as the primary downloader that fetches headers over P2P. +The headers section controls both the behavior of the header stage, which downloads historical headers, as well as the primary downloader that fetches headers over P2P. ```toml [stages.headers] @@ -65,7 +65,7 @@ commit_threshold = 10000 ### `bodies` -The bodies section controls both the behavior of the bodies stage, which download historical block bodies, as well as the primary downloader that fetches block bodies over P2P. +The bodies section controls both the behavior of the bodies stage, which downloads historical block bodies, as well as the primary downloader that fetches block bodies over P2P. ```toml [stages.bodies] @@ -102,7 +102,7 @@ The sender recovery stage recovers the address of transaction senders using tran ```toml [stages.sender_recovery] -# The amount of transactions to recover senders for before +# The number of transactions to recover senders for before # writing the results to disk. # # Lower thresholds correspond to more frequent disk I/O (writes), diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index 2a862314a1d..0e2090acbcb 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -1,6 +1,6 @@ # Sync OP Mainnet -To sync OP mainnet, bedrock state needs to be imported as a starting point. There are currently two ways: +To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways: * Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. * Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node diff --git a/book/run/transactions.md b/book/run/transactions.md index 61327b57300..edb3a24d76f 100644 --- a/book/run/transactions.md +++ b/book/run/transactions.md @@ -38,7 +38,7 @@ Alongside the `accessList` parameter and legacy parameters (except `gasPrice`), The base fee is burned, while the priority fee is paid to the miner who includes the transaction, incentivizing miners to include transactions with higher priority fees per gas. -## EIP-4844 Transaction +## EIP-4844 Transactions [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844) transactions (type `0x3`) was introduced in Ethereum's Dencun fork. This provides a temporary but significant scaling relief for rollups by allowing them to initially scale to 0.375 MB per slot, with a separate fee market allowing fees to be very low while usage of this system is limited. From 292e9d9812cd74662801252d6a9e08fe3b0ef738 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 18 Nov 2024 14:28:43 +0100 Subject: [PATCH 530/970] test(tx-pool): add more unit tests for blob pool (#12605) --- crates/transaction-pool/src/pool/blob.rs | 98 ++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index ac39c6ab781..e6c0cb245c3 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -693,4 +693,102 @@ mod tests { ); } } + + #[test] + fn test_empty_pool_operations() { + let mut pool: BlobTransactions = BlobTransactions::default(); + + // Ensure pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Attempt to remove a non-existent transaction + let non_existent_id = TransactionId::new(0.into(), 0); + assert!(pool.remove_transaction(&non_existent_id).is_none()); + + // Check contains method on empty pool + assert!(!pool.contains(&non_existent_id)); + } + + #[test] + fn test_transaction_removal() { + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + + // Add a transaction + let tx = factory.validated_arc(MockTransaction::eip4844()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Remove the transaction + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert_eq!(*removed.unwrap().id(), tx_id); + assert!(pool.is_empty()); + } + + #[test] + fn test_satisfy_attributes_empty_pool() { + let pool: BlobTransactions = BlobTransactions::default(); + let attributes = BestTransactionsAttributes { blob_fee: Some(100), basefee: 100 }; + // Satisfy attributes on an empty pool should return an empty vector + let satisfied = pool.satisfy_attributes(attributes); + assert!(satisfied.is_empty()); + } + + #[test] + #[should_panic(expected = "transaction is not a blob tx")] + fn test_add_non_blob_transaction() { + // Ensure that adding a non-blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip1559()); // Not a blob transaction + pool.add_transaction(tx); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_add_duplicate_blob_transaction() { + // Ensure that adding a duplicate blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip4844()); + pool.add_transaction(tx.clone()); // First addition + pool.add_transaction(tx); // Attempt to add the same transaction again + } + + #[test] + fn test_remove_transactions_until_limit() { + // Test truncating the pool until it satisfies the given size limit + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx1 = factory.validated_arc(MockTransaction::eip4844().with_size(100)); + let tx2 = factory.validated_arc(MockTransaction::eip4844().with_size(200)); + let tx3 = factory.validated_arc(MockTransaction::eip4844().with_size(300)); + + // Add transactions to the pool + pool.add_transaction(tx1); + pool.add_transaction(tx2); + pool.add_transaction(tx3); + + // Set a size limit that requires truncation + let limit = SubPoolLimit { max_txs: 2, max_size: 300 }; + let removed = pool.truncate_pool(limit); + + // Check that only one transaction was removed to satisfy the limit + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 2); + assert!(pool.size() <= limit.max_size); + } + + #[test] + fn test_empty_pool_invariants() { + // Ensure that the invariants hold for an empty pool + let pool: BlobTransactions = BlobTransactions::default(); + pool.assert_invariants(); + assert!(pool.is_empty()); + assert_eq!(pool.size(), 0); + assert_eq!(pool.len(), 0); + } } From cee11dfb7c7a321a49151d013573299018fc58fe Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 18 Nov 2024 14:28:59 +0100 Subject: [PATCH 531/970] test(tx-pool): add more unit tests for pending pool (#12603) --- crates/transaction-pool/src/pool/pending.rs | 100 ++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index f4bce8c85a6..ee2bcd96e84 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -880,4 +880,104 @@ mod tests { } } } + + #[test] + fn test_empty_pool_behavior() { + let mut pool = PendingPool::::new(MockOrdering::default()); + + // Ensure the pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Verify that attempting to truncate an empty pool does not panic and returns an empty vec + let removed = pool.truncate_pool(SubPoolLimit { max_txs: 10, max_size: 1000 }); + assert!(removed.is_empty()); + + // Verify that retrieving transactions from an empty pool yields nothing + let all_txs: Vec<_> = pool.all().collect(); + assert!(all_txs.is_empty()); + } + + #[test] + fn test_add_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add a transaction and check if it's in the pool + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Remove the transaction and ensure it's no longer in the pool + let removed_tx = pool.remove_transaction(tx.id()).unwrap(); + assert_eq!(removed_tx.id(), tx.id()); + assert!(!pool.contains(tx.id())); + assert_eq!(pool.len(), 0); + } + + #[test] + fn test_reorder_on_basefee_update() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add two transactions with different fees + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price_by(20)); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Ensure the transactions are in the correct order + let mut best = pool.best(); + assert_eq!(best.next().unwrap().hash(), tx2.hash()); + assert_eq!(best.next().unwrap().hash(), tx1.hash()); + + // Update the base fee to a value higher than tx1's fee, causing it to be removed + let removed = pool.update_base_fee((tx1.max_fee_per_gas() + 1) as u64); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert_eq!(pool.len(), 1); + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_handle_duplicates() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add the same transaction twice and ensure it only appears once + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Attempt to add the same transaction again, which should be ignored + pool.add_transaction(tx, 0); + } + + #[test] + fn test_update_blob_fee() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add transactions with varying blob fees + let tx1 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(50).clone()); + let tx2 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(150).clone()); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Update the blob fee to a value that causes tx1 to be removed + let removed = pool.update_blob_fee(100); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } } From ff22c8eef83c54f5893d7ae8abe72a1bfdf77516 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Nov 2024 14:30:44 +0100 Subject: [PATCH 532/970] chore(sdk): Define `MaybeSerde` (#12577) Co-authored-by: Matthias Seitz --- Cargo.lock | 73 +++++++++---------- crates/evm/execution-types/Cargo.toml | 3 +- crates/exex/exex/Cargo.toml | 3 +- crates/net/eth-wire-types/Cargo.toml | 1 + crates/net/eth-wire/Cargo.toml | 3 +- crates/net/network/Cargo.toml | 1 + crates/optimism/cli/Cargo.toml | 3 +- crates/optimism/primitives/Cargo.toml | 41 ++++++++--- crates/optimism/primitives/src/tx_type.rs | 10 +-- crates/primitives-traits/Cargo.toml | 19 ++++- crates/primitives-traits/src/account.rs | 7 +- crates/primitives-traits/src/block/body.rs | 5 +- crates/primitives-traits/src/block/header.rs | 10 ++- crates/primitives-traits/src/block/mod.rs | 14 +--- crates/primitives-traits/src/lib.rs | 13 ++++ crates/primitives-traits/src/node.rs | 8 +- crates/primitives-traits/src/receipt.rs | 7 +- .../primitives-traits/src/transaction/mod.rs | 9 +-- .../src/transaction/signed.rs | 5 +- crates/primitives/Cargo.toml | 2 +- crates/revm/Cargo.toml | 1 + crates/storage/codecs/Cargo.toml | 2 +- crates/storage/db-api/Cargo.toml | 2 +- crates/storage/db-models/Cargo.toml | 2 +- crates/storage/db/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 1 + crates/transaction-pool/Cargo.toml | 1 + crates/trie/common/Cargo.toml | 2 +- 28 files changed, 143 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ded071c5dc2..56f2864c5fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,9 +182,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fb9fd842fdf10a524bbf2c4de6942ad869c1c8c3d128a1b09e67ed5f7cedbd" +checksum = "5f6cee6a35793f3db8a5ffe60e86c695f321d081a567211245f503e8c498fce8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" +checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -1525,9 +1525,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" dependencies = [ "memchr", "regex-automata 0.4.9", @@ -1651,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1752,9 +1752,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1762,9 +1762,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1786,9 +1786,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "coins-bip32" @@ -1859,14 +1859,14 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm 0.27.0", + "crossterm", "strum", "strum_macros", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -2107,19 +2107,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.6.0", - "crossterm_winapi", - "libc", - "parking_lot", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -3198,9 +3185,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -6165,7 +6152,7 @@ dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm 0.28.1", + "crossterm", "instability", "itertools 0.13.0", "lru", @@ -6174,7 +6161,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -6665,7 +6652,7 @@ dependencies = [ "backon", "clap", "comfy-table", - "crossterm 0.28.1", + "crossterm", "eyre", "fdlimit", "futures", @@ -8397,6 +8384,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "rstest", + "serde", ] [[package]] @@ -9629,6 +9617,7 @@ checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ "bytemuck", "byteorder", + "serde", ] [[package]] @@ -9903,9 +9892,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.4" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" +checksum = "66b202022bb57c049555430e11fc22fea12909276a80a4c3d368da36ac1d88ed" dependencies = [ "sdd", ] @@ -11349,7 +11338,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -11358,6 +11347,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 13b0aef8ad4..4d2d8214ff9 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -40,7 +40,8 @@ serde = [ "revm/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "rand/serde", + "reth-primitives-traits/serde", ] serde-bincode-compat = [ "reth-primitives/serde-bincode-compat", diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index f7ab4fce5df..3cbeb115b06 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -78,5 +78,6 @@ serde = [ "alloy-primitives/serde", "parking_lot/serde", "rand/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index f9759ffc25a..8b89603167d 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -65,4 +65,5 @@ serde = [ "alloy-primitives/serde", "bytes/serde", "rand/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 3999f658e0a..ffbd3017fa6 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -87,7 +87,8 @@ serde = [ "rand/serde", "secp256k1/serde", "reth-codecs/serde", - "alloy-chains/serde" + "alloy-chains/serde", + "reth-primitives-traits/serde", ] [[test]] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index dde0b4a0b23..ad8e65dffc6 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -121,6 +121,7 @@ serde = [ "rand/serde", "smallvec/serde", "url/serde", + "reth-primitives-traits/serde", ] test-utils = [ "dep:reth-provider", diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 198e5377ec4..d090075927a 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -119,5 +119,6 @@ serde = [ "alloy-primitives/serde", "op-alloy-consensus?/serde", "reth-execution-types/serde", - "reth-provider/serde" + "reth-provider/serde", + "reth-optimism-primitives/serde", ] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 216e559a201..4c6d9f51406 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,21 +12,44 @@ description = "OP primitive types" workspace = true [dependencies] +# reth +reth-primitives-traits.workspace = true +reth-codecs = { workspace = true, optional = true } +reth-primitives = { workspace = true, features = ["reth-codec"], optional = true } + +# ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true -op-alloy-consensus.workspace = true alloy-eips.workspace = true alloy-rlp.workspace = true -derive_more.workspace = true + +# op +op-alloy-consensus.workspace = true + +# codec bytes.workspace = true -reth-primitives-traits.workspace = true -reth-codecs = { workspace = true, optional = true } -reth-primitives = { workspace = true, features = ["reth-codec"], optional = true } +serde = { workspace = true, optional = true } -[features] -default = ["reth-codec"] -reth-codec = ["dep:reth-codecs", "dep:reth-primitives"] +# misc +derive_more.workspace = true [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } -rstest.workspace = true \ No newline at end of file +rstest.workspace = true + +[features] +default = ["reth-codec"] +reth-codec = [ + "dep:reth-codecs", + "dep:reth-primitives" +] +serde = [ + "dep:serde", + "reth-primitives-traits/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "bytes/serde", + "reth-codecs/serde", + "op-alloy-consensus/serde", +] diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index 70f5fd32d8e..9ddfe77b192 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -3,8 +3,9 @@ //! This type is required because a `Compact` impl is needed on the deposit tx type. use core::fmt::Debug; -use std::convert::TryFrom; +#[cfg(feature = "reth-codec")] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; @@ -13,10 +14,6 @@ use derive_more::{ Display, }; use op_alloy_consensus::OpTxType as AlloyOpTxType; -use reth_primitives_traits::{InMemorySize, TxType}; - -#[cfg(feature = "reth-codec")] -use alloy_consensus::constants::EIP7702_TX_TYPE_ID; #[cfg(feature = "reth-codec")] use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; #[cfg(feature = "reth-codec")] @@ -24,8 +21,9 @@ use reth_primitives::transaction::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, }; +use reth_primitives_traits::{InMemorySize, TxType}; -/// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement [`TxType`] trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] #[into(u8)] pub struct OpTxType(AlloyOpTxType); diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 651583f8e4d..20430fbc882 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -12,15 +12,16 @@ description = "Common types in reth." workspace = true [dependencies] +# reth reth-codecs.workspace = true -alloy-consensus = { workspace = true, features = ["serde"] } +# ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true - -revm-primitives = { workspace = true, features = ["serde"] } +revm-primitives.workspace = true # misc byteorder = "1" @@ -76,7 +77,19 @@ arbitrary = [ "reth-codecs/arbitrary" ] serde-bincode-compat = [ + "serde", "serde_with", "alloy-consensus/serde-bincode-compat", "alloy-eips/serde-bincode-compat" ] +serde = [ + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs/serde", + "revm-primitives/serde", + "roaring/serde", + "revm-primitives/serde", +] \ No newline at end of file diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index ae58973edd7..927e39a52e1 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -6,7 +6,6 @@ use bytes::Buf; use derive_more::Deref; use reth_codecs::{add_arbitrary_tests, Compact}; use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; -use serde::{Deserialize, Serialize}; /// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). const LEGACY_RAW_BYTECODE_ID: u8 = 0; @@ -24,7 +23,8 @@ const EOF_BYTECODE_ID: u8 = 3; const EIP7702_BYTECODE_ID: u8 = 4; /// An Ethereum account. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct Account { @@ -60,7 +60,8 @@ impl Account { /// Bytecode for an account. /// /// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Default, PartialEq, Eq, Deref)] pub struct Bytecode(pub RevmBytecode); impl Bytecode { diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index e9aadf40957..074efc4d514 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,6 +1,6 @@ //! Block body abstraction. -use crate::InMemorySize; +use crate::{InMemorySize, MaybeSerde}; use alloc::fmt; use alloy_consensus::Transaction; @@ -15,11 +15,10 @@ pub trait BlockBody: + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize + + MaybeSerde { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 779df442538..524835879f3 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -1,10 +1,12 @@ //! Block header data primitive. -use crate::InMemorySize; -use alloy_primitives::Sealable; use core::fmt; + +use alloy_primitives::Sealable; use reth_codecs::Compact; +use crate::{InMemorySize, MaybeSerde}; + /// Helper trait that unifies all behaviour required by block header to support full node /// operations. pub trait FullBlockHeader: BlockHeader + Compact {} @@ -26,6 +28,7 @@ pub trait BlockHeader: + alloy_consensus::BlockHeader + Sealable + InMemorySize + + MaybeSerde { } @@ -38,12 +41,11 @@ impl BlockHeader for T where + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + alloy_consensus::BlockHeader + Sealable + InMemorySize + + MaybeSerde { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 6bef9ea167f..5b1faeafbb7 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -7,7 +7,7 @@ use alloc::fmt; use reth_codecs::Compact; -use crate::{BlockHeader, FullBlockHeader, InMemorySize}; +use crate::{BlockHeader, FullBlockHeader, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: Block + Compact {} @@ -20,17 +20,7 @@ impl FullBlock for T where T: Block + Compact {} // senders #[auto_impl::auto_impl(&, Arc)] pub trait Block: - Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + InMemorySize + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + InMemorySize + MaybeSerde { /// Header part of the block. type Header: BlockHeader + 'static; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 584181f2c95..1c848b81413 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -92,3 +92,16 @@ pub trait MaybeArbitrary {} impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} #[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] impl MaybeArbitrary for T {} + +/// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. +#[cfg(feature = "serde")] +pub trait MaybeSerde: serde::Serialize + for<'de> serde::Deserialize<'de> {} +/// Noop. Helper trait that would require de-/serialize implementation if `serde` feature were +/// enabled. +#[cfg(not(feature = "serde"))] +pub trait MaybeSerde {} + +#[cfg(feature = "serde")] +impl MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {} +#[cfg(not(feature = "serde"))] +impl MaybeSerde for T {} diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index ca490ac15aa..35c8ea0f693 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,17 +1,17 @@ use core::fmt; -use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType}; +use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType, MaybeSerde}; /// Configures all the primitive types of the node. pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { /// Block primitive. - type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; /// Transaction envelope type ID. type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; /// A receipt. - type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; } impl NodePrimitives for () { diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index b34590dff0e..64839ecb8b4 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,13 +1,11 @@ //! Receipt abstraction +use crate::{InMemorySize, MaybeSerde}; use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; use core::fmt; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -use crate::InMemorySize; /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + Compact {} @@ -26,9 +24,8 @@ pub trait Receipt: + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable - + Serialize + + MaybeSerde + InMemorySize - + for<'de> Deserialize<'de> { /// Returns transaction type. fn tx_type(&self) -> u8; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 33ee36090ac..9d60be0c32e 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -6,9 +6,8 @@ use core::{fmt, hash::Hash}; use alloy_primitives::B256; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; -use crate::{FullTxType, InMemorySize, MaybeArbitrary, TxType}; +use crate::{FullTxType, InMemorySize, MaybeArbitrary, MaybeSerde, TxType}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullTransaction: Transaction + Compact {} @@ -26,10 +25,9 @@ pub trait Transaction: + Eq + PartialEq + Hash - + Serialize - + for<'de> Deserialize<'de> + TransactionExt + InMemorySize + + MaybeSerde + MaybeArbitrary { } @@ -44,10 +42,9 @@ impl Transaction for T where + Eq + PartialEq + Hash - + Serialize - + for<'de> Deserialize<'de> + TransactionExt + InMemorySize + + MaybeSerde + MaybeArbitrary { } diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 7b6abbaec0f..d860dbb92fc 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -8,7 +8,7 @@ use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use reth_codecs::Compact; use revm_primitives::TxEnv; -use crate::{FullTransaction, InMemorySize, MaybeArbitrary, Transaction}; +use crate::{FullTransaction, InMemorySize, MaybeArbitrary, MaybeSerde, Transaction}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullSignedTx: SignedTransaction + Compact {} @@ -27,13 +27,12 @@ pub trait SignedTransaction: + PartialEq + Eq + Hash - + serde::Serialize - + for<'a> serde::Deserialize<'a> + alloy_rlp::Encodable + alloy_rlp::Decodable + Encodable2718 + Decodable2718 + alloy_consensus::Transaction + + MaybeSerde + MaybeArbitrary + InMemorySize { diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index c9043a2bd11..89282c8f93d 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true revm-primitives = { workspace = true, features = ["serde"] } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index d1202cd8b2c..4bc78b7b056 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -59,4 +59,5 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "alloy-consensus/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 20a0673dff6..57fe9f726c7 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -81,7 +81,7 @@ serde = [ "alloy-primitives/serde", "alloy-trie?/serde", "bytes/serde", - "op-alloy-consensus?/serde" + "op-alloy-consensus?/serde", ] arbitrary = [ "alloy-consensus?/arbitrary", diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 9b8589cb6aa..bcc3e778984 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 59d95c2263d..44c0c3d962a 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } # ethereum alloy-primitives.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 6042b5faa81..7dca8aa8475 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 399e3e000b9..eff0540638a 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -110,6 +110,7 @@ serde = [ "rand/serde", "revm/serde", "reth-codecs/serde", + "reth-optimism-primitives?/serde", ] test-utils = [ "reth-db/test-utils", diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 22df8253682..7c0f3476559 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -88,6 +88,7 @@ serde = [ "rand?/serde", "revm/serde", "smallvec/serde", + "reth-primitives-traits/serde", ] test-utils = [ "rand", diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0616e259710..49d09d6f39b 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,7 +12,7 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-codecs.workspace = true alloy-primitives.workspace = true From dc45aa9fffc869f1c865697a58843cc87113de57 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 13:40:44 +0000 Subject: [PATCH 533/970] chore(deps): weekly `cargo update` (#12611) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56f2864c5fa..054ad19dad5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4579,9 +4579,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libloading" @@ -5286,9 +5286,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" +checksum = "862db7293434837c1ca32ef509806a7b330bd24605da95438cd6e928a58b4b2c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5304,9 +5304,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" +checksum = "8ebd0391a3123b47e44ccca8a6f63a39ead2d7ea52e4fc132ff1297f6184314e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5318,9 +5318,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" +checksum = "6fd5d57f04f7ce1ba8be7704ba87fe7bea151a94ffc971f5a8a68b3bdf962471" dependencies = [ "alloy-consensus", "alloy-network", @@ -5333,9 +5333,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" +checksum = "0220768efb59871af53e1685b90983c9f3090cdf45df3d0107348362ba7055ee" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5343,6 +5343,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "async-trait", + "brotli", "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", @@ -5353,9 +5354,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" +checksum = "03db591ad512fdc70170fcb2bff3517b64811443f9fb65d3a1a6344c60acdbf0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5372,9 +5373,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" +checksum = "dd1a11a9cf2f2e8ed9ae11c93dce5990ff81ff98f17995772f567b586a864812" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9756,9 +9757,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", From 26ce7fbdb2aeb37e9009065f55ab826b2f4d2b56 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 18 Nov 2024 14:56:14 +0100 Subject: [PATCH 534/970] feat(trie): add extend method to MultiProof (#12467) --- crates/trie/common/Cargo.toml | 1 + crates/trie/common/src/proofs.rs | 78 +++++++++++++++++++++++++++++++- 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 49d09d6f39b..0161fc7ff3d 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -34,6 +34,7 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +alloy-primitives = { workspace = true, features = ["getrandom"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index f6eaf3960ec..d0a5cd22042 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -12,7 +12,7 @@ use alloy_trie::{ use itertools::Itertools; use reth_primitives_traits::Account; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use std::collections::{hash_map, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes @@ -76,6 +76,24 @@ impl MultiProof { } Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) } + + /// Extends this multiproof with another one, merging both account and storage + /// proofs. + pub fn extend(&mut self, other: Self) { + self.account_subtree.extend_from(other.account_subtree); + + for (hashed_address, storage) in other.storages { + match self.storages.entry(hashed_address) { + hash_map::Entry::Occupied(mut entry) => { + debug_assert_eq!(entry.get().root, storage.root); + entry.get_mut().subtree.extend_from(storage.subtree); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(storage); + } + } + } + } } /// The merkle multiproof of storage trie. @@ -255,3 +273,61 @@ pub mod triehash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_multiproof_extend_account_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + + proof1.account_subtree.insert( + Nibbles::unpack(addr1), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof2.account_subtree.insert( + Nibbles::unpack(addr2), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + + proof1.extend(proof2); + + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr1))); + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr2))); + } + + #[test] + fn test_multiproof_extend_storage_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr = B256::random(); + let root = B256::random(); + + let mut subtree1 = ProofNodes::default(); + subtree1.insert( + Nibbles::from_nibbles(vec![0]), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof1.storages.insert(addr, StorageMultiProof { root, subtree: subtree1 }); + + let mut subtree2 = ProofNodes::default(); + subtree2.insert( + Nibbles::from_nibbles(vec![1]), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + proof2.storages.insert(addr, StorageMultiProof { root, subtree: subtree2 }); + + proof1.extend(proof2); + + let storage = proof1.storages.get(&addr).unwrap(); + assert_eq!(storage.root, root); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![0]))); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![1]))); + } +} From 8339c716b4edeecfa44620fce67978cad2f05342 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 18 Nov 2024 14:58:31 +0100 Subject: [PATCH 535/970] feat(engine): introduce sync implementation of StateRootTask (#12378) --- Cargo.lock | 4 +- crates/engine/tree/Cargo.toml | 8 +- crates/engine/tree/benches/channel_perf.rs | 132 ++++++++++++++++ crates/engine/tree/src/tree/root.rs | 167 +++++++++++++++------ 4 files changed, 257 insertions(+), 54 deletions(-) create mode 100644 crates/engine/tree/benches/channel_perf.rs diff --git a/Cargo.lock b/Cargo.lock index 054ad19dad5..e83e20a0399 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,9 +7224,10 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", + "criterion", + "crossbeam-channel", "futures", "metrics", - "pin-project", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7261,7 +7262,6 @@ dependencies = [ "revm-primitives", "thiserror 1.0.69", "tokio", - "tokio-stream", "tracing", ] diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 278457145e7..d6e1c80a726 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -45,9 +45,7 @@ revm-primitives.workspace = true # common futures.workspace = true -pin-project.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } -tokio-stream.workspace = true thiserror.workspace = true # metrics @@ -82,6 +80,12 @@ reth-chainspec.workspace = true alloy-rlp.workspace = true assert_matches.workspace = true +criterion.workspace = true +crossbeam-channel = "0.5.13" + +[[bench]] +name = "channel_perf" +harness = false [features] test-utils = [ diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs new file mode 100644 index 00000000000..c1c65e0a68e --- /dev/null +++ b/crates/engine/tree/benches/channel_perf.rs @@ -0,0 +1,132 @@ +//! Benchmark comparing `std::sync::mpsc` and `crossbeam` channels for `StateRootTask`. + +#![allow(missing_docs)] + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, HashMap, + B256, U256, +}; +use std::thread; + +/// Creates a mock state with the specified number of accounts for benchmarking +fn create_bench_state(num_accounts: usize) -> EvmState { + let mut state_changes = HashMap::default(); + + for i in 0..num_accounts { + let storage = + EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1)))]); + + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + } + + state_changes +} + +/// Simulated `StateRootTask` with `std::sync::mpsc` +struct StdStateRootTask { + rx: std::sync::mpsc::Receiver, +} + +impl StdStateRootTask { + const fn new(rx: std::sync::mpsc::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Simulated `StateRootTask` with `crossbeam-channel` +struct CrossbeamStateRootTask { + rx: crossbeam_channel::Receiver, +} + +impl CrossbeamStateRootTask { + const fn new(rx: crossbeam_channel::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Benchmarks the performance of different channel implementations for state streaming +fn bench_state_stream(c: &mut Criterion) { + let mut group = c.benchmark_group("state_stream_channels"); + group.sample_size(10); + + for size in &[1, 10, 100] { + let bench_setup = || { + let states: Vec<_> = (0..100).map(|_| create_bench_state(*size)).collect(); + states + }; + + group.bench_with_input(BenchmarkId::new("std_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = std::sync::mpsc::channel(); + let task = StdStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + + group.bench_with_input(BenchmarkId::new("crossbeam_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = crossbeam_channel::unbounded(); + let task = CrossbeamStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_stream); +criterion_main!(benches); diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index fbf6c348138..45cf5a78031 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,18 +1,13 @@ //! State root task related functionality. -use futures::Stream; -use pin_project::pin_project; use reth_provider::providers::ConsistentDbView; use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::root::ParallelStateRootError; use revm_primitives::{EvmState, B256}; -use std::{ - future::Future, - pin::Pin, - sync::{mpsc, Arc}, - task::{Context, Poll}, +use std::sync::{ + mpsc::{self, Receiver, RecvError}, + Arc, }; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; /// Result of the state root calculation @@ -28,12 +23,43 @@ pub(crate) struct StateRootHandle { #[allow(dead_code)] impl StateRootHandle { + /// Creates a new handle from a receiver. + pub(crate) const fn new(rx: mpsc::Receiver) -> Self { + Self { rx } + } + /// Waits for the state root calculation to complete. pub(crate) fn wait_for_result(self) -> StateRootResult { self.rx.recv().expect("state root task was dropped without sending result") } } +/// Common configuration for state root tasks +#[derive(Debug)] +pub(crate) struct StateRootConfig { + /// View over the state in the database. + pub consistent_view: ConsistentDbView, + /// Latest trie input. + pub input: Arc, +} + +/// Wrapper for std channel receiver to maintain compatibility with `UnboundedReceiverStream` +#[allow(dead_code)] +pub(crate) struct StdReceiverStream { + rx: Receiver, +} + +#[allow(dead_code)] +impl StdReceiverStream { + pub(crate) const fn new(rx: Receiver) -> Self { + Self { rx } + } + + pub(crate) fn recv(&self) -> Result { + self.rx.recv() + } +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -42,15 +68,12 @@ impl StateRootHandle { /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[pin_project] +#[allow(dead_code)] pub(crate) struct StateRootTask { - /// View over the state in the database. - consistent_view: ConsistentDbView, /// Incoming state updates. - #[pin] - state_stream: UnboundedReceiverStream, - /// Latest trie input. - input: Arc, + state_stream: StdReceiverStream, + /// Task configuration. + config: StateRootConfig, } #[allow(dead_code)] @@ -60,65 +83,109 @@ where { /// Creates a new `StateRootTask`. pub(crate) const fn new( - consistent_view: ConsistentDbView, - input: Arc, - state_stream: UnboundedReceiverStream, + config: StateRootConfig, + state_stream: StdReceiverStream, ) -> Self { - Self { consistent_view, state_stream, input } + Self { config, state_stream } } /// Spawns the state root task and returns a handle to await its result. pub(crate) fn spawn(self) -> StateRootHandle { - let (tx, rx) = mpsc::channel(); - - // Spawn the task that will process state updates and calculate the root - tokio::spawn(async move { - debug!(target: "engine::tree", "Starting state root task"); - let result = self.await; - let _ = tx.send(result); - }); + let (tx, rx) = mpsc::sync_channel(1); + std::thread::Builder::new() + .name("State Root Task".to_string()) + .spawn(move || { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.run(); + let _ = tx.send(result); + }) + .expect("failed to spawn state root thread"); - StateRootHandle { rx } + StateRootHandle::new(rx) } /// Handles state updates. fn on_state_update( - _view: &ConsistentDbView, - _input: &Arc, + _view: &reth_provider::providers::ConsistentDbView, + _input: &std::sync::Arc, _state: EvmState, ) { + // Default implementation of state update handling // TODO: calculate hashed state update and dispatch proof gathering for it. } } -impl Future for StateRootTask +#[allow(dead_code)] +impl StateRootTask where Factory: Send + 'static, { - type Output = StateRootResult; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - // Process all items until the stream is closed - loop { - match this.state_stream.as_mut().poll_next(cx) { - Poll::Ready(Some(state)) => { - Self::on_state_update(this.consistent_view, this.input, state); - } - Poll::Ready(None) => { - // stream closed, return final result - return Poll::Ready(Ok((B256::default(), TrieUpdates::default()))); - } - Poll::Pending => { - return Poll::Pending; - } - } + fn run(self) -> StateRootResult { + while let Ok(state) = self.state_stream.recv() { + Self::on_state_update(&self.config.consistent_view, &self.config.input, state); } // TODO: // * keep track of proof calculation // * keep track of intermediate root computation // * return final state root result + Ok((B256::default(), TrieUpdates::default())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_provider::{providers::ConsistentDbView, test_utils::MockEthProvider}; + use reth_trie::TrieInput; + use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, + HashMap, B256, U256, + }; + use std::sync::Arc; + + fn create_mock_config() -> StateRootConfig { + let factory = MockEthProvider::default(); + let view = ConsistentDbView::new(factory, None); + let input = Arc::new(TrieInput::default()); + StateRootConfig { consistent_view: view, input } + } + + fn create_mock_state() -> revm_primitives::EvmState { + let mut state_changes: EvmState = HashMap::default(); + let storage = EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + + state_changes + } + + #[test] + fn test_state_root_task() { + let config = create_mock_config(); + let (tx, rx) = std::sync::mpsc::channel(); + let stream = StdReceiverStream::new(rx); + + let task = StateRootTask::new(config, stream); + let handle = task.spawn(); + + for _ in 0..10 { + tx.send(create_mock_state()).expect("failed to send state"); + } + drop(tx); + + let result = handle.wait_for_result(); + assert!(result.is_ok(), "sync block execution failed"); } } From 2dc75fb9966a9afc93a235d42ee2a6195144cebb Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 18 Nov 2024 18:09:44 +0400 Subject: [PATCH 536/970] fix: correctly prune transactions during on-disk reorgs (#12630) --- crates/storage/provider/src/writer/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 1c3894e9cfd..17dea5a6d51 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -285,7 +285,8 @@ where let tx_range = self .database() .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); + // We are using end + 1 - start here because the returned range is inclusive. + let total_txs = (tx_range.end() + 1).saturating_sub(*tx_range.start()); // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); From 1d2934ba698ef90d07ae4961ce7e72b0a078e48d Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 18 Nov 2024 16:02:39 +0100 Subject: [PATCH 537/970] feat(trie): turn TrieWitness methods into public functions (#12510) --- crates/trie/trie/src/witness.rs | 264 ++++++++++++++++---------------- 1 file changed, 131 insertions(+), 133 deletions(-) diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 8290f158062..b2364b385e1 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -118,16 +118,15 @@ where account_rlp.clone() }); let key = Nibbles::unpack(hashed_address); - account_trie_nodes.extend( - self.target_nodes( - key.clone(), - value, - account_multiproof - .account_subtree - .matching_nodes_iter(&key) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + account_trie_nodes.extend(target_nodes( + key.clone(), + value, + Some(&mut self.witness), + account_multiproof + .account_subtree + .matching_nodes_iter(&key) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); // Gather and record storage trie nodes for this account. let mut storage_trie_nodes = BTreeMap::default(); @@ -138,19 +137,18 @@ where .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - storage_trie_nodes.extend( - self.target_nodes( - slot_nibbles.clone(), - slot_value, - storage_multiproof - .subtree - .matching_nodes_iter(&slot_nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + storage_trie_nodes.extend(target_nodes( + slot_nibbles.clone(), + slot_value, + Some(&mut self.witness), + storage_multiproof + .subtree + .matching_nodes_iter(&slot_nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); } - Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { + next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -177,7 +175,7 @@ where })?; } - Self::next_root_from_proofs(account_trie_nodes, |key: Nibbles| { + next_root_from_proofs(account_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -197,63 +195,6 @@ where Ok(self.witness) } - /// Decodes and unrolls all nodes from the proof. Returns only sibling nodes - /// in the path of the target and the final leaf node with updated value. - fn target_nodes<'b>( - &mut self, - key: Nibbles, - value: Option>, - proof: impl IntoIterator, - ) -> Result>>, TrieWitnessError> { - let mut trie_nodes = BTreeMap::default(); - let mut proof_iter = proof.into_iter().enumerate().peekable(); - while let Some((idx, (path, encoded))) = proof_iter.next() { - // Record the node in witness. - self.witness.insert(keccak256(encoded.as_ref()), encoded.clone()); - - let mut next_path = path.clone(); - match TrieNode::decode(&mut &encoded[..])? { - TrieNode::Branch(branch) => { - next_path.push(key[path.len()]); - let children = branch_node_children(path.clone(), &branch); - for (child_path, value) in children { - if !key.starts_with(&child_path) { - let value = if value.len() < B256::len_bytes() { - Either::Right(value.to_vec()) - } else { - Either::Left(B256::from_slice(&value[1..])) - }; - trie_nodes.insert(child_path, value); - } - } - } - TrieNode::Extension(extension) => { - next_path.extend_from_slice(&extension.key); - } - TrieNode::Leaf(leaf) => { - next_path.extend_from_slice(&leaf.key); - if next_path != key { - trie_nodes.insert( - next_path.clone(), - Either::Right(leaf.value.as_slice().to_vec()), - ); - } - } - TrieNode::EmptyRoot => { - if idx != 0 || proof_iter.peek().is_some() { - return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) - } - } - }; - } - - if let Some(value) = value { - trie_nodes.insert(key, Either::Right(value)); - } - - Ok(trie_nodes) - } - /// Retrieve proof targets for incoming hashed state. /// This method will aggregate all accounts and slots present in the hash state as well as /// select all existing slots from the database for the accounts that have been destroyed. @@ -283,73 +224,130 @@ where } Ok(proof_targets) } +} - fn next_root_from_proofs( - trie_nodes: BTreeMap>>, - mut trie_node_provider: impl FnMut(Nibbles) -> Result, - ) -> Result { - // Ignore branch child hashes in the path of leaves or lower child hashes. - let mut keys = trie_nodes.keys().peekable(); - let mut ignored = HashSet::::default(); - while let Some(key) = keys.next() { - if keys.peek().is_some_and(|next| next.starts_with(key)) { - ignored.insert(key.clone()); +/// Decodes and unrolls all nodes from the proof. Returns only sibling nodes +/// in the path of the target and the final leaf node with updated value. +pub fn target_nodes<'b>( + key: Nibbles, + value: Option>, + mut witness: Option<&mut HashMap>, + proof: impl IntoIterator, +) -> Result>>, TrieWitnessError> { + let mut trie_nodes = BTreeMap::default(); + let mut proof_iter = proof.into_iter().enumerate().peekable(); + while let Some((idx, (path, encoded))) = proof_iter.next() { + // Record the node in witness. + if let Some(witness) = witness.as_mut() { + witness.insert(keccak256(encoded.as_ref()), encoded.clone()); + } + + let mut next_path = path.clone(); + match TrieNode::decode(&mut &encoded[..])? { + TrieNode::Branch(branch) => { + next_path.push(key[path.len()]); + let children = branch_node_children(path.clone(), &branch); + for (child_path, value) in children { + if !key.starts_with(&child_path) { + let value = if value.len() < B256::len_bytes() { + Either::Right(value.to_vec()) + } else { + Either::Left(B256::from_slice(&value[1..])) + }; + trie_nodes.insert(child_path, value); + } + } + } + TrieNode::Extension(extension) => { + next_path.extend_from_slice(&extension.key); + } + TrieNode::Leaf(leaf) => { + next_path.extend_from_slice(&leaf.key); + if next_path != key { + trie_nodes + .insert(next_path.clone(), Either::Right(leaf.value.as_slice().to_vec())); + } + } + TrieNode::EmptyRoot => { + if idx != 0 || proof_iter.peek().is_some() { + return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) + } } + }; + } + + if let Some(value) = value { + trie_nodes.insert(key, Either::Right(value)); + } + + Ok(trie_nodes) +} + +/// Computes the next root hash of a trie by processing a set of trie nodes and +/// their provided values. +pub fn next_root_from_proofs( + trie_nodes: BTreeMap>>, + mut trie_node_provider: impl FnMut(Nibbles) -> Result, +) -> Result { + // Ignore branch child hashes in the path of leaves or lower child hashes. + let mut keys = trie_nodes.keys().peekable(); + let mut ignored = HashSet::::default(); + while let Some(key) = keys.next() { + if keys.peek().is_some_and(|next| next.starts_with(key)) { + ignored.insert(key.clone()); } + } - let mut hash_builder = HashBuilder::default(); - let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); - while let Some((path, value)) = trie_nodes.next() { - match value { - Either::Left(branch_hash) => { - let parent_branch_path = path.slice(..path.len() - 1); - if hash_builder.key.starts_with(&parent_branch_path) || - trie_nodes - .peek() - .is_some_and(|next| next.0.starts_with(&parent_branch_path)) - { - hash_builder.add_branch(path, branch_hash, false); - } else { - // Parent is a branch node that needs to be turned into an extension node. - let mut path = path.clone(); - loop { - let node = trie_node_provider(path.clone())?; - match TrieNode::decode(&mut &node[..])? { - TrieNode::Branch(branch) => { - let children = branch_node_children(path, &branch); - for (child_path, value) in children { - if value.len() < B256::len_bytes() { - hash_builder.add_leaf(child_path, value); - } else { - let hash = B256::from_slice(&value[1..]); - hash_builder.add_branch(child_path, hash, false); - } + let mut hash_builder = HashBuilder::default(); + let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); + while let Some((path, value)) = trie_nodes.next() { + match value { + Either::Left(branch_hash) => { + let parent_branch_path = path.slice(..path.len() - 1); + if hash_builder.key.starts_with(&parent_branch_path) || + trie_nodes.peek().is_some_and(|next| next.0.starts_with(&parent_branch_path)) + { + hash_builder.add_branch(path, branch_hash, false); + } else { + // Parent is a branch node that needs to be turned into an extension node. + let mut path = path.clone(); + loop { + let node = trie_node_provider(path.clone())?; + match TrieNode::decode(&mut &node[..])? { + TrieNode::Branch(branch) => { + let children = branch_node_children(path, &branch); + for (child_path, value) in children { + if value.len() < B256::len_bytes() { + hash_builder.add_leaf(child_path, value); + } else { + let hash = B256::from_slice(&value[1..]); + hash_builder.add_branch(child_path, hash, false); } - break - } - TrieNode::Leaf(leaf) => { - let mut child_path = path; - child_path.extend_from_slice(&leaf.key); - hash_builder.add_leaf(child_path, &leaf.value); - break - } - TrieNode::Extension(ext) => { - path.extend_from_slice(&ext.key); - } - TrieNode::EmptyRoot => { - return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } + break + } + TrieNode::Leaf(leaf) => { + let mut child_path = path; + child_path.extend_from_slice(&leaf.key); + hash_builder.add_leaf(child_path, &leaf.value); + break + } + TrieNode::Extension(ext) => { + path.extend_from_slice(&ext.key); + } + TrieNode::EmptyRoot => { + return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } } } } - Either::Right(leaf_value) => { - hash_builder.add_leaf(path, &leaf_value); - } + } + Either::Right(leaf_value) => { + hash_builder.add_leaf(path, &leaf_value); } } - Ok(hash_builder.root()) } + Ok(hash_builder.root()) } /// Returned branch node children with keys in order. From 8aa9b71ef78e39759c3907ab4df3ebd4f6f43cdd Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 18 Nov 2024 16:51:46 +0100 Subject: [PATCH 538/970] chore(witness): simplify wiped storage retrieval (#12637) --- crates/trie/trie/src/witness.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index b2364b385e1..6f6a66a16eb 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -213,11 +213,10 @@ where let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(*hashed_address)?; // position cursor at the start - if let Some((hashed_slot, _)) = storage_cursor.seek(B256::ZERO)? { - storage_keys.insert(hashed_slot); - } - while let Some((hashed_slot, _)) = storage_cursor.next()? { + let mut current_entry = storage_cursor.seek(B256::ZERO)?; + while let Some((hashed_slot, _)) = current_entry { storage_keys.insert(hashed_slot); + current_entry = storage_cursor.next()?; } } proof_targets.insert(*hashed_address, storage_keys); From 55b51364b094077dd93d6e44477216f21e13d583 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Nov 2024 17:27:39 +0100 Subject: [PATCH 539/970] fix(discv5): warning discv5 config socket override (#12636) --- crates/net/discv5/src/config.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 4a534afbef5..61ab94b4f2f 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,11 +412,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv4 addr with RLPx address. this is since there is no @@ -429,11 +431,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv6 addr with RLPx address. this is since there is no From f1279b35493d4086ee0c71d48416e6b22adfb11e Mon Sep 17 00:00:00 2001 From: Jennifer Date: Mon, 18 Nov 2024 20:34:09 +0000 Subject: [PATCH 540/970] Run kurtosis e2e test 2x/day (#12641) --- .github/assets/kurtosis_network_params.yaml | 2 -- .github/workflows/kurtosis.yml | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/assets/kurtosis_network_params.yaml b/.github/assets/kurtosis_network_params.yaml index 9c104de4950..e8cc1b51dc8 100644 --- a/.github/assets/kurtosis_network_params.yaml +++ b/.github/assets/kurtosis_network_params.yaml @@ -2,8 +2,6 @@ participants: - el_type: geth cl_type: lighthouse - el_type: reth - el_extra_params: - - --engine.experimental el_image: "ghcr.io/paradigmxyz/reth:kurtosis-ci" cl_type: teku additional_services: diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 74d26dbd3ee..3e1b7432111 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -5,8 +5,8 @@ name: kurtosis on: workflow_dispatch: schedule: - # every day - - cron: "0 1 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always From 7fb862cbde4137b372458e7302f270e220b44843 Mon Sep 17 00:00:00 2001 From: witty <131909329+0xwitty@users.noreply.github.com> Date: Mon, 18 Nov 2024 23:59:58 +0300 Subject: [PATCH 541/970] Typo Update private-testnet.md (#12633) --- book/run/private-testnet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 3a987e52c73..28253ca9f01 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -6,7 +6,7 @@ This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethe * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration). From 641d1288e9314ecc360605bcda9c2cac7885a64e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Nov 2024 22:43:26 +0100 Subject: [PATCH 542/970] chore(sdk): limit `FillTxEnv` to super trait of `FullSignedTx` (#12614) --- Cargo.lock | 70 ++++++++++--------- crates/optimism/payload/src/payload.rs | 2 +- crates/primitives-traits/src/lib.rs | 1 + .../src/transaction/execute.rs | 10 +++ .../primitives-traits/src/transaction/mod.rs | 1 + .../src/transaction/signed.rs | 16 +++-- crates/primitives/src/transaction/mod.rs | 2 + 7 files changed, 60 insertions(+), 42 deletions(-) create mode 100644 crates/primitives-traits/src/transaction/execute.rs diff --git a/Cargo.lock b/Cargo.lock index e83e20a0399..20fc477ea22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4964,9 +4964,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -4978,9 +4978,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", @@ -5286,9 +5286,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862db7293434837c1ca32ef509806a7b330bd24605da95438cd6e928a58b4b2c" +checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5299,14 +5299,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "spin", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-genesis" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebd0391a3123b47e44ccca8a6f63a39ead2d7ea52e4fc132ff1297f6184314e" +checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5314,13 +5314,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd5d57f04f7ce1ba8be7704ba87fe7bea151a94ffc971f5a8a68b3bdf962471" +checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" dependencies = [ "alloy-consensus", "alloy-network", @@ -5333,10 +5334,11 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0220768efb59871af53e1685b90983c9f3090cdf45df3d0107348362ba7055ee" +checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" dependencies = [ + "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", @@ -5344,19 +5346,20 @@ dependencies = [ "alloy-serde", "async-trait", "brotli", - "derive_more 1.0.0", + "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", + "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03db591ad512fdc70170fcb2bff3517b64811443f9fb65d3a1a6344c60acdbf0" +checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5373,9 +5376,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd1a11a9cf2f2e8ed9ae11c93dce5990ff81ff98f17995772f567b586a864812" +checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5387,6 +5390,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", + "thiserror 2.0.3", ] [[package]] @@ -5466,9 +5470,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5477,19 +5481,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] @@ -9744,9 +9749,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -10057,9 +10062,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10384,9 +10389,6 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] [[package]] name = "spki" @@ -10990,9 +10992,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", @@ -11592,9 +11594,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" dependencies = [ "futures", "js-sys", diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 36f11ee628b..1a951abadca 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -7,7 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; -use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 1c848b81413..33becad2fea 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -26,6 +26,7 @@ pub use receipt::{FullReceipt, Receipt}; pub mod transaction; pub use transaction::{ + execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, FullTransaction, Transaction, TransactionExt, }; diff --git a/crates/primitives-traits/src/transaction/execute.rs b/crates/primitives-traits/src/transaction/execute.rs new file mode 100644 index 00000000000..c7350f1941b --- /dev/null +++ b/crates/primitives-traits/src/transaction/execute.rs @@ -0,0 +1,10 @@ +//! Abstraction of an executable transaction. + +use alloy_primitives::Address; +use revm_primitives::TxEnv; + +/// Loads transaction into execution environment. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 9d60be0c32e..53b77278571 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,5 +1,6 @@ //! Transaction abstraction +pub mod execute; pub mod signed; use core::{fmt, hash::Hash}; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index d860dbb92fc..633b0caf7b2 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -6,14 +6,19 @@ use core::hash::Hash; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use reth_codecs::Compact; -use revm_primitives::TxEnv; -use crate::{FullTransaction, InMemorySize, MaybeArbitrary, MaybeSerde, Transaction}; +use crate::{FillTxEnv, FullTransaction, InMemorySize, MaybeArbitrary, MaybeSerde, Transaction}; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullSignedTx: SignedTransaction + Compact {} +pub trait FullSignedTx: + SignedTransaction + FillTxEnv + Compact +{ +} -impl FullSignedTx for T where T: SignedTransaction + Compact {} +impl FullSignedTx for T where + T: SignedTransaction + FillTxEnv + Compact +{ +} /// A signed transaction. #[auto_impl::auto_impl(&, Arc)] @@ -71,9 +76,6 @@ pub trait SignedTransaction: fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } - - /// Fills [`TxEnv`] with an [`Address`] and transaction. - fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); } /// Helper trait used in testing. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index aa57ef8d81e..41522744a2f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1360,7 +1360,9 @@ impl SignedTransaction for TransactionSigned { let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } +} +impl reth_primitives_traits::FillTxEnv for TransactionSigned { fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { tx_env.caller = sender; match self.as_ref() { From e859e1711d815c866273bb68f465e7c73bc85b28 Mon Sep 17 00:00:00 2001 From: AJStonewee Date: Mon, 18 Nov 2024 20:13:03 -0400 Subject: [PATCH 543/970] docs: small fix in HARDFORK-CHECKLIST.md (#12646) --- HARDFORK-CHECKLIST.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index 80ebfc20c98..17c639f0d5e 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -17,5 +17,5 @@ ### Updates to the engine API - Add new endpoints to the `EngineApi` trait and implement endpoints. -- Update the `ExceuctionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. -- Update version specific validation checks in the `EngineValidator` trait. \ No newline at end of file +- Update the `ExecutionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. From 06bf5c77839972a8df7f9195499a67d38e840d29 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Nov 2024 10:27:23 +0100 Subject: [PATCH 544/970] chore(sdk): make `Chain` generic over data primitives (#12635) --- crates/ethereum/node/src/node.rs | 16 +---- crates/evm/execution-types/src/chain.rs | 60 ++++++++++--------- crates/optimism/evm/src/lib.rs | 2 +- crates/optimism/node/src/node.rs | 2 +- crates/primitives-traits/src/node.rs | 43 +++++++++++-- crates/primitives/src/lib.rs | 13 +++- .../transaction-pool/src/blobstore/tracker.rs | 2 +- 7 files changed, 86 insertions(+), 52 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 5265329f19a..1615ef0e686 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -13,8 +13,7 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, - NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, }; use reth_node_builder::{ components::{ @@ -26,7 +25,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; +use reth_primitives::EthPrimitives; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -38,17 +37,6 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; -/// Ethereum primitive types. -#[derive(Debug, Default, Clone)] -pub struct EthPrimitives; - -impl NodePrimitives for EthPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type TxType = TxType; - type Receipt = Receipt; -} - /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index dc633e2d7ab..b32b53b885e 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -7,9 +7,10 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, + SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::NodePrimitives; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -25,7 +26,7 @@ use revm::db::BundleState; /// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct Chain { +pub struct Chain { /// All blocks in this chain. blocks: BTreeMap, /// The outcome of block execution for this chain. @@ -34,14 +35,14 @@ pub struct Chain { /// chain, ranging from the [`Chain::first`] block to the [`Chain::tip`] block, inclusive. /// /// Additionally, it includes the individual state changes that led to the current state. - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, /// State trie updates after block is added to the chain. /// NOTE: Currently, trie updates are present only for /// single-block chains that extend the canonical chain. trie_updates: Option, } -impl Chain { +impl Chain { /// Create new Chain from blocks and state. /// /// # Warning @@ -49,7 +50,7 @@ impl Chain { /// A chain of blocks should not be empty. pub fn new( blocks: impl IntoIterator, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); @@ -61,7 +62,7 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { Self::new([block], execution_outcome, trie_updates) @@ -93,12 +94,12 @@ impl Chain { } /// Get execution outcome of this chain - pub const fn execution_outcome(&self) -> &ExecutionOutcome { + pub const fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_outcome } /// Get mutable execution outcome of this chain - pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { &mut self.execution_outcome } @@ -132,7 +133,7 @@ impl Chain { pub fn execution_outcome_at_block( &self, block_number: BlockNumber, - ) -> Option { + ) -> Option> { if self.tip().number == block_number { return Some(self.execution_outcome.clone()) } @@ -149,19 +150,21 @@ impl Chain { /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. /// 3. The optional trie updates. - pub fn into_inner(self) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + pub fn into_inner( + self, + ) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } /// Returns an iterator over all the receipts of the blocks in the chain. - pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { + pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { self.execution_outcome.receipts().iter() } @@ -173,7 +176,7 @@ impl Chain { /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator>)> + '_ { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -219,7 +222,7 @@ impl Chain { } /// Get all receipts for the given block. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; self.execution_outcome.receipts_by_block(num).iter().map(Option::as_ref).collect() } @@ -227,7 +230,7 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec { + pub fn receipts_with_attachment(&self) -> Vec> { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) @@ -250,7 +253,7 @@ impl Chain { pub fn append_block( &mut self, block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, ) { self.blocks.insert(block.number, block); self.execution_outcome.extend(execution_outcome); @@ -300,7 +303,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { + pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); let block_number = match split_at { ChainSplitTarget::Hash(block_hash) => { @@ -454,11 +457,11 @@ impl IntoIterator for ChainBlocks<'_> { /// Used to hold receipts and their attachment. #[derive(Default, Clone, Debug, PartialEq, Eq)] -pub struct BlockReceipts { +pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, /// Transaction identifier and receipt. - pub tx_receipts: Vec<(TxHash, Receipt)>, + pub tx_receipts: Vec<(TxHash, T)>, } /// The target block where the chain should be split. @@ -484,26 +487,26 @@ impl From for ChainSplitTarget { /// Result of a split chain. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ChainSplit { +pub enum ChainSplit { /// Chain is not split. Pending chain is returned. /// Given block split is higher than last block. /// Or in case of split by hash when hash is unknown. - NoSplitPending(Chain), + NoSplitPending(Chain), /// Chain is not split. Canonical chain is returned. /// Given block split is lower than first block. - NoSplitCanonical(Chain), + NoSplitCanonical(Chain), /// Chain is split into two: `[canonical]` and `[pending]` /// The target of this chain split [`ChainSplitTarget`] belongs to the `canonical` chain. Split { /// Contains lower block numbers that are considered canonicalized. It ends with /// the [`ChainSplitTarget`] block. The state of this chain is now empty and no longer /// usable. - canonical: Chain, + canonical: Chain, /// Right contains all subsequent blocks __after__ the [`ChainSplitTarget`] that are still /// pending. /// /// The state of the original chain is moved here. - pending: Chain, + pending: Chain, }, } @@ -678,7 +681,7 @@ mod tests { block3.set_parent_hash(block2_hash); - let mut chain1 = + let mut chain1: Chain = Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() }; let chain2 = @@ -692,7 +695,7 @@ mod tests { #[test] fn test_number_split() { - let execution_outcome1 = ExecutionOutcome::new( + let execution_outcome1: ExecutionOutcome = ExecutionOutcome::new( BundleState::new( vec![( Address::new([2; 20]), @@ -739,7 +742,8 @@ mod tests { let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); - let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); + let chain: Chain = + Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); let (split1_execution_outcome, split2_execution_outcome) = chain.execution_outcome.clone().split_at(2); @@ -838,7 +842,7 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain { + let chain: Chain = Chain { blocks: BTreeMap::from([(10, block1), (11, block2)]), execution_outcome: execution_outcome.clone(), ..Default::default() diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 9569c1cb8b5..55dc3fc7deb 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -602,7 +602,7 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + let chain: Chain = Chain::new([block1, block2], execution_outcome.clone(), None); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 238953c9d57..699239a43b2 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -44,7 +44,7 @@ use reth_trie_db::MerklePatriciaTrie; use std::sync::Arc; /// Optimism primitive types. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 35c8ea0f693..c11a19a105a 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -3,15 +3,44 @@ use core::fmt; use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType, MaybeSerde}; /// Configures all the primitive types of the node. -pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { +pub trait NodePrimitives: + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +{ /// Block primitive. - type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; + type Block: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; + type SignedTx: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; /// Transaction envelope type ID. - type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static; /// A receipt. - type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + MaybeSerde + 'static; + type Receipt: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; } impl NodePrimitives for () { @@ -22,7 +51,9 @@ impl NodePrimitives for () { } /// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { +pub trait FullNodePrimitives: + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +{ /// Block primitive. type Block: FullBlock>; /// Signed version of the transaction type. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 45067d60079..2618f671927 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -40,7 +40,7 @@ pub use receipt::{ }; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, HeaderError, Log, LogData, - SealedHeader, StorageEntry, + NodePrimitives, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; @@ -74,3 +74,14 @@ pub mod serde_bincode_compat { transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, }; } + +/// Temp helper struct for integrating [`NodePrimitives`]. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct EthPrimitives; + +impl NodePrimitives for EthPrimitives { + type Block = crate::Block; + type SignedTx = crate::TransactionSigned; + type TxType = crate::TxType; + type Receipt = crate::Receipt; +} diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index f22dcf5706e..63d6e30eea0 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -178,7 +178,7 @@ mod tests { }; // Extract blocks from the chain - let chain = Chain::new(vec![block1, block2], Default::default(), None); + let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None); let blocks = chain.into_inner().0; // Add new chain blocks to the tracker From 496bf0bf715f0a1fafc198f8d72ccd71913d1a40 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 11:13:59 +0100 Subject: [PATCH 545/970] chore: bump version 1.1.2 (#12651) --- Cargo.lock | 240 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 121 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20fc477ea22..f0d116c29ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2596,7 +2596,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5395,7 +5395,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "reth-cli-util", @@ -6342,7 +6342,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6415,7 +6415,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6443,7 +6443,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6497,7 +6497,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6533,7 +6533,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6571,7 +6571,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6584,7 +6584,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6613,7 +6613,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6634,7 +6634,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-genesis", "clap", @@ -6647,7 +6647,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.1" +version = "1.1.2" dependencies = [ "ahash", "alloy-consensus", @@ -6714,7 +6714,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-tasks", "tokio", @@ -6723,7 +6723,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6741,7 +6741,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6763,7 +6763,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.1" +version = "1.1.2" dependencies = [ "convert_case", "proc-macro2", @@ -6774,7 +6774,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "eyre", @@ -6790,7 +6790,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6803,7 +6803,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6819,7 +6819,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6842,7 +6842,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -6883,7 +6883,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6911,7 +6911,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6940,7 +6940,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6957,7 +6957,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6984,7 +6984,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7008,7 +7008,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7036,7 +7036,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7075,7 +7075,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7113,7 +7113,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.1" +version = "1.1.2" dependencies = [ "aes", "alloy-primitives", @@ -7143,7 +7143,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7174,7 +7174,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7192,7 +7192,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "pin-project", @@ -7221,7 +7221,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7272,7 +7272,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7303,7 +7303,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7315,7 +7315,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-eips", @@ -7352,7 +7352,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7376,7 +7376,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -7387,7 +7387,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7402,7 +7402,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7421,7 +7421,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7441,7 +7441,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7467,7 +7467,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "rayon", @@ -7477,7 +7477,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7505,7 +7505,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7528,7 +7528,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7543,7 +7543,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7561,7 +7561,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7604,7 +7604,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "eyre", @@ -7637,7 +7637,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7653,7 +7653,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "serde", "serde_json", @@ -7662,7 +7662,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7687,7 +7687,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "async-trait", "bytes", @@ -7709,7 +7709,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7730,7 +7730,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bindgen", "cc", @@ -7738,7 +7738,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "metrics", @@ -7749,14 +7749,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures-util", "if-addrs", @@ -7770,7 +7770,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7832,7 +7832,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7854,7 +7854,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7876,7 +7876,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7892,7 +7892,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7905,7 +7905,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.1" +version = "1.1.2" dependencies = [ "anyhow", "bincode", @@ -7923,7 +7923,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -7944,7 +7944,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8009,7 +8009,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8059,7 +8059,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8104,7 +8104,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8127,7 +8127,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "eyre", "http", @@ -8153,7 +8153,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8164,7 +8164,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8184,7 +8184,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8234,7 +8234,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8251,7 +8251,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8278,7 +8278,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8289,7 +8289,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8340,7 +8340,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8377,7 +8377,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8395,7 +8395,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8439,7 +8439,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-codecs", "reth-db-api", @@ -8450,7 +8450,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8472,7 +8472,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8485,7 +8485,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8503,7 +8503,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8512,7 +8512,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8522,7 +8522,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8569,7 +8569,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8597,7 +8597,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8646,7 +8646,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "assert_matches", @@ -8676,7 +8676,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -8696,7 +8696,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8714,7 +8714,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8786,7 +8786,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8810,7 +8810,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8829,7 +8829,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8880,7 +8880,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8918,7 +8918,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8960,7 +8960,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9002,7 +9002,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9019,7 +9019,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9034,7 +9034,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9052,7 +9052,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9102,7 +9102,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "aquamarine", @@ -9130,7 +9130,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -9147,7 +9147,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "assert_matches", @@ -9169,7 +9169,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "clap", @@ -9180,7 +9180,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9201,7 +9201,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9213,7 +9213,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "auto_impl", "dyn-clone", @@ -9230,7 +9230,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9243,7 +9243,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "tokio", "tokio-stream", @@ -9252,7 +9252,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -9266,7 +9266,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9313,7 +9313,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9343,7 +9343,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9367,7 +9367,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9396,7 +9396,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9423,7 +9423,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", diff --git a/Cargo.toml b/Cargo.toml index 2f2f9aa884a..58cdd1f8ca7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.1" +version = "1.1.2" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" From 206ba29f0b9e74ad77a2b0a3f5ebec07bc8b7462 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:06:33 +0100 Subject: [PATCH 546/970] tx-pool: add `all` method for `AllPoolTransactions` (#12643) --- crates/transaction-pool/src/traits.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6c247a84cdb..a7e9010d693 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -551,6 +551,11 @@ impl AllPoolTransactions { pub fn queued_recovered(&self) -> impl Iterator + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } + + /// Returns an iterator over all transactions, both pending and queued. + pub fn all(&self) -> impl Iterator + '_ { + self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) + } } impl Default for AllPoolTransactions { From b78f20f5cb0c79ada804c70c162b76fc70bee2c0 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 19 Nov 2024 11:07:30 +0100 Subject: [PATCH 547/970] fix: do not delegate is_optimism check for Ethereum ChainSpec (#12650) --- crates/chainspec/src/api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index f0cc31bb44d..94b4285f92d 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -109,6 +109,6 @@ impl EthChainSpec for ChainSpec { } fn is_optimism(&self) -> bool { - self.chain.is_optimism() + false } } From e924bdab37dd05f66395f5f316bb3e318337c8fb Mon Sep 17 00:00:00 2001 From: "0xriazaka.eth" <168359025+0xriazaka@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:42:40 +0100 Subject: [PATCH 548/970] Header validator (#12648) Co-authored-by: Arsenii Kulikov --- crates/consensus/consensus/src/lib.rs | 73 ++++++++------- crates/consensus/consensus/src/noop.rs | 6 +- crates/consensus/consensus/src/test_utils.rs | 42 ++++----- crates/ethereum/consensus/src/lib.rs | 46 +++++----- .../src/headers/reverse_headers.rs | 2 +- crates/net/p2p/src/headers/downloader.rs | 4 +- crates/net/p2p/src/test_utils/headers.rs | 2 +- crates/optimism/consensus/src/lib.rs | 88 ++++++++++--------- 8 files changed, 139 insertions(+), 124 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ec296f3ed49..e059305911f 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -46,7 +46,42 @@ impl<'a> PostExecutionInput<'a> { /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: HeaderValidator + Debug + Send + Sync { + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + + /// Validate a block disregarding world state, i.e. things that can be checked before sender + /// recovery and execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and + /// 11.1 "Ommer Validation". + /// + /// **This should not be called for the genesis block**. + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; + + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError>; +} + +/// HeaderValidator is a protocol that validates headers and their relationships. +#[auto_impl::auto_impl(&, Arc)] +pub trait HeaderValidator: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. @@ -60,7 +95,8 @@ pub trait Consensus: Debug + Send + Sync { /// /// **This should not be called for the genesis block**. /// - /// Note: Validating header against its parent does not include other Consensus validations. + /// Note: Validating header against its parent does not include other HeaderValidator + /// validations. fn validate_header_against_parent( &self, header: &SealedHeader, @@ -99,43 +135,12 @@ pub trait Consensus: Debug + Send + Sync { /// /// Some consensus engines may want to do additional checks here. /// - /// Note: validating headers with TD does not include other Consensus validation. + /// Note: validating headers with TD does not include other HeaderValidator validation. fn validate_header_with_total_difficulty( &self, header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; - - /// Ensures that body field values match the header. - fn validate_body_against_header( - &self, - body: &B, - header: &SealedHeader, - ) -> Result<(), ConsensusError>; - - /// Validate a block disregarding world state, i.e. things that can be checked before sender - /// recovery and execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". - /// - /// **This should not be called for the genesis block**. - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) - -> Result<(), ConsensusError>; - - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; } /// Consensus Errors diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 9b72f89b176..6d12af08d51 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,4 +1,4 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; @@ -7,7 +7,7 @@ use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { +impl HeaderValidator for NoopConsensus { fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } @@ -27,7 +27,9 @@ impl Consensus for NoopConsensus { ) -> Result<(), ConsensusError> { Ok(()) } +} +impl Consensus for NoopConsensus { fn validate_body_against_header( &self, _body: &B, diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 52926ec323e..ba683dd255f 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,4 +1,4 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; @@ -47,18 +47,21 @@ impl TestConsensus { } impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_header_against_parent( + fn validate_block_pre_execution( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -67,10 +70,10 @@ impl Consensus for TestConsensus { } } - fn validate_header_with_total_difficulty( + fn validate_block_post_execution( &self, - _header: &H, - _total_difficulty: U256, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -78,22 +81,21 @@ impl Consensus for TestConsensus { Ok(()) } } +} - fn validate_body_against_header( - &self, - _body: &B, - _header: &SealedHeader, - ) -> Result<(), ConsensusError> { - if self.fail_body_against_header() { +impl HeaderValidator for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block_pre_execution( + fn validate_header_against_parent( &self, - _block: &SealedBlock, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -102,10 +104,10 @@ impl Consensus for TestConsensus { } } - fn validate_block_post_execution( + fn validate_header_with_total_difficulty( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _header: &H, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 7198a703672..ffabe5b1952 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, @@ -92,6 +92,30 @@ impl EthBeaconConsensus impl Consensus for EthBeaconConsensus +{ + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + +impl HeaderValidator + for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; @@ -210,26 +234,6 @@ impl Consensu Ok(()) } - - fn validate_body_against_header( - &self, - body: &BlockBody, - header: &SealedHeader, - ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) - } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validate_block_pre_execution(block, &self.chain_spec) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } #[cfg(test)] diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 0f8111e4395..2d79e0a7af6 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -9,7 +9,7 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, HeaderValidator}; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index f02d9461fc1..03ab467bafb 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -4,7 +4,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::Stream; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; use reth_primitives_traits::BlockWithParent; /// A downloader capable of fetching and yielding block headers. @@ -83,7 +83,7 @@ impl SyncTarget { /// /// Returns Ok(false) if the pub fn validate_header_download( - consensus: &dyn Consensus, + consensus: &dyn HeaderValidator, header: &SealedHeader, parent: &SealedHeader, ) -> DownloadResult<()> { diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index bc5262abef4..5809ad6bdd4 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -147,7 +147,7 @@ impl Stream for TestDownload { let empty: SealedHeader = SealedHeader::default(); if let Err(error) = - Consensus::<_>::validate_header_against_parent(&this.consensus, &empty, &empty) + >::validate_header_against_parent(&this.consensus, &empty, &empty) { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 72f67dcb450..e8b7959dd27 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -12,7 +12,7 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, @@ -47,6 +47,50 @@ impl OpBeaconConsensus { } impl Consensus for OpBeaconConsensus { + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + // Check ommers hash + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); + if block.header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + validate_shanghai_withdrawals(block)?; + } + + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + validate_cancun_gas(block)?; + } + + Ok(()) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + +impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) @@ -118,46 +162,4 @@ impl Consensus for OpBeaconConsensus { Ok(()) } - - fn validate_body_against_header( - &self, - body: &BlockBody, - header: &SealedHeader, - ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) - } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - // Check ommers hash - let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); - if block.header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), - )) - } - - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - - // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { - validate_shanghai_withdrawals(block)?; - } - - if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { - validate_cancun_gas(block)?; - } - - Ok(()) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } From 0db10a13a53bf477777f4af7fba2bed805c915cf Mon Sep 17 00:00:00 2001 From: Cypher Pepe <125112044+cypherpepe@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:05:30 +0300 Subject: [PATCH 549/970] fix: typos in troubleshooting.md (#12652) --- book/run/troubleshooting.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 7368b6631ab..cab39cb1165 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -8,7 +8,7 @@ This page tries to answer how to deal with the most popular issues. If you're: 1. Running behind the tip -2. Have slow canonical commit time according to the `Canonical Commit Latency time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) +2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) 3. Seeing warnings in your logs such as ```console 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s @@ -48,7 +48,7 @@ equal to the [freshly synced node](../installation/installation.md#hardware-requ mv reth_compact.dat $(reth db path)/mdbx.dat ``` 7. Start Reth -8. Confirm that the values on the `Freelist` chart is near zero and the values on the `Canonical Commit Latency time` chart +8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart is less than 1 second. 9. Delete original database ```bash From da77ffc9515dd575bab6238e65e089053072fe67 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Nov 2024 13:53:34 +0100 Subject: [PATCH 550/970] chore(sdk): Move `reth_optimism_node::OpPrimitives` into `reth-optimism-primitives` (#12649) --- Cargo.lock | 81 +++++++++++----------- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/lib.rs | 4 +- crates/optimism/node/src/node.rs | 30 +++----- crates/optimism/payload/src/payload.rs | 2 +- crates/optimism/primitives/Cargo.toml | 5 +- crates/optimism/primitives/src/lib.rs | 16 +++++ crates/primitives-traits/src/block/body.rs | 10 ++- crates/primitives-traits/src/lib.rs | 2 +- 9 files changed, 84 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0d116c29ae..adacf448f20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4579,9 +4579,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -4964,9 +4964,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", @@ -4978,9 +4978,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", "proc-macro2", @@ -5286,9 +5286,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" +checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5299,14 +5299,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "thiserror 2.0.3", + "spin", ] [[package]] name = "op-alloy-genesis" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" +checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5314,14 +5314,13 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" +checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" dependencies = [ "alloy-consensus", "alloy-network", @@ -5334,32 +5333,29 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" +checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" dependencies = [ - "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "async-trait", - "brotli", - "miniz_oxide", + "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" +checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5376,9 +5372,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.7" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" +checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5390,7 +5386,6 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.3", ] [[package]] @@ -5470,9 +5465,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5481,20 +5476,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] @@ -8268,6 +8262,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-prune-types", "reth-revm", @@ -8387,6 +8382,7 @@ dependencies = [ "derive_more 1.0.0", "op-alloy-consensus", "reth-codecs", + "reth-node-types", "reth-primitives", "reth-primitives-traits", "rstest", @@ -9749,9 +9745,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -9762,9 +9758,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.17" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -10062,9 +10058,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10389,6 +10385,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -10992,9 +10991,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", "base64 0.22.1", @@ -11594,9 +11593,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" dependencies = [ "futures", "js-sys", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f6b22ad14c8..98496bb2653 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -48,6 +48,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true +reth-optimism-primitives.workspace = true [features] default = ["std"] diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 55dc3fc7deb..be1fb6d3227 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -211,6 +211,7 @@ mod tests { AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; use reth_optimism_chainspec::BASE_MAINNET; + use reth_optimism_primitives::OpPrimitives; use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; use reth_revm::{ @@ -602,7 +603,8 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain: Chain = Chain::new([block1, block2], execution_outcome.clone(), None); + let chain: Chain = + Chain::new([block1, block2], execution_outcome.clone(), None); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 699239a43b2..70f32c01ffd 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1,18 +1,14 @@ //! Optimism Node types config. -use crate::{ - args::RollupArgs, - engine::OpEngineValidator, - txpool::{OpTransactionPool, OpTransactionValidator}, - OpEngineTypes, -}; +use std::sync::Arc; + use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -32,7 +28,6 @@ use reth_optimism_rpc::{ OpEthApi, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -41,18 +36,13 @@ use reth_transaction_pool::{ TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; -use std::sync::Arc; -/// Optimism primitive types. -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct OpPrimitives; - -impl NodePrimitives for OpPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type TxType = TxType; - type Receipt = Receipt; -} +use crate::{ + args::RollupArgs, + engine::OpEngineValidator, + txpool::{OpTransactionPool, OpTransactionValidator}, + OpEngineTypes, +}; /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] @@ -125,7 +115,7 @@ where } impl NodeTypes for OpNode { - type Primitives = OpPrimitives; + type Primitives = reth_primitives::EthPrimitives; // todo: replace with OpPrimitives when EthPrimitives is only used in reth-ethereum-* crates type ChainSpec = OpChainSpec; type StateCommitment = MerklePatriciaTrie; } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 1a951abadca..36f11ee628b 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -7,7 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; -use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; +use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 4c6d9f51406..ade6d4eb6bc 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,9 +13,10 @@ workspace = true [dependencies] # reth +reth-node-types.workspace = true +reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-codecs = { workspace = true, optional = true } -reth-primitives = { workspace = true, features = ["reth-codec"], optional = true } # ethereum alloy-primitives.workspace = true @@ -41,7 +42,7 @@ rstest.workspace = true default = ["reth-codec"] reth-codec = [ "dep:reth-codecs", - "dep:reth-primitives" + "reth-primitives/reth-codec" ] serde = [ "dep:serde", diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index a0745e7ac7d..5f6b1848e64 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -9,3 +9,19 @@ pub mod bedrock; pub mod tx_type; + +pub use tx_type::OpTxType; + +use reth_node_types::NodePrimitives; +use reth_primitives::{Block, Receipt, TransactionSigned}; + +/// Optimism primitive types. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct OpPrimitives; + +impl NodePrimitives for OpPrimitives { + type Block = Block; + type SignedTx = TransactionSigned; + type TxType = OpTxType; + type Receipt = Receipt; +} diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 074efc4d514..66c9c2d2e3a 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,8 +1,16 @@ //! Block body abstraction. -use crate::{InMemorySize, MaybeSerde}; use alloc::fmt; + use alloy_consensus::Transaction; +use reth_codecs::Compact; + +use crate::{FullSignedTx, InMemorySize, MaybeSerde}; + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullBlockBody: BlockBody + Compact {} + +impl FullBlockBody for T where T: BlockBody + Compact {} /// Abstraction for block's body. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 33becad2fea..acee2fd04d1 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -36,7 +36,7 @@ pub use integer_list::{IntegerList, IntegerListError}; pub mod block; pub use block::{ - body::BlockBody, + body::{BlockBody, FullBlockBody}, header::{BlockHeader, FullBlockHeader}, Block, FullBlock, }; From 03992a53ec7f504d3e0d211160d8aa00e699b4fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 13:32:52 +0000 Subject: [PATCH 551/970] chore(deps): bump dcarbone/install-jq-action from 2 to 3 (#12645) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fa7b4f9f45c..7e6b8747fff 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -71,7 +71,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - uses: dcarbone/install-jq-action@v2 + - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks run: .github/assets/check_wasm.sh From 6615fd2efc8f3f94f1a4924ff3be2f65c474aba9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 14:46:31 +0100 Subject: [PATCH 552/970] chore: re-export header from primitives traits (#12657) --- crates/primitives-traits/src/header/mod.rs | 2 +- crates/primitives-traits/src/header/sealed.rs | 9 ++++----- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives/src/lib.rs | 4 ++-- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index b36a74471ff..ea5f7eafb51 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::{BlockWithParent, SealedHeader}; +pub use sealed::{BlockWithParent, Header, SealedHeader}; mod error; pub use error::HeaderError; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f4a365e1512..d9931fc95c5 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,16 +1,15 @@ -use core::mem; - -use alloy_consensus::{Header, Sealed}; +use crate::InMemorySize; +pub use alloy_consensus::Header; +use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; +use core::mem; use derive_more::{AsRef, Deref}; use reth_codecs::add_arbitrary_tests; use serde::{Deserialize, Serialize}; -use crate::InMemorySize; - /// A helper struct to store the block number/hash and its parent hash. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct BlockWithParent { diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index acee2fd04d1..819825d635f 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -60,7 +60,7 @@ pub use tx_type::{FullTxType, TxType}; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockWithParent, HeaderError, SealedHeader}; +pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2618f671927..b2f43892018 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,8 +39,8 @@ pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ - logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, HeaderError, Log, LogData, - NodePrimitives, SealedHeader, StorageEntry, + logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, + LogData, NodePrimitives, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; From 66a9d3e424a0f59d7fa71a6e801891ef8e372f69 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 16:33:19 +0100 Subject: [PATCH 553/970] fix: run upkeep manually (#12664) --- crates/node/builder/src/launch/common.rs | 15 +++--- crates/node/metrics/src/recorder.rs | 69 +++++++++++++++++++++--- crates/node/metrics/src/server.rs | 2 +- 3 files changed, 71 insertions(+), 15 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 41fbf93e05d..e01d117e7bc 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -2,6 +2,11 @@ use std::{sync::Arc, thread::available_parallelism}; +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; @@ -34,6 +39,7 @@ use reth_node_core::{ use reth_node_metrics::{ chain::ChainSpecInfo, hooks::Hooks, + recorder::install_prometheus_recorder, server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; @@ -58,12 +64,6 @@ use tokio::sync::{ oneshot, watch, }; -use crate::{ - components::{NodeComponents, NodeComponentsBuilder}, - hooks::OnComponentInitializedHook, - BuilderContext, NodeAdapter, -}; - /// Allows to set a tree viewer for a configured blockchain provider. // TODO: remove this helper trait once the engine revamp is done, the new // blockchain provider won't require a TreeViewer. @@ -509,6 +509,9 @@ where /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + // ensure recorder runs upkeep periodically + install_prometheus_recorder().spawn_upkeep(); + let listen_addr = self.node_config().metrics; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs index a7421ab355c..e62b98c81cd 100644 --- a/crates/node/metrics/src/recorder.rs +++ b/crates/node/metrics/src/recorder.rs @@ -3,25 +3,78 @@ use eyre::WrapErr; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; -use std::sync::LazyLock; +use std::sync::{atomic::AtomicBool, LazyLock}; /// Installs the Prometheus recorder as the global recorder. -pub fn install_prometheus_recorder() -> &'static PrometheusHandle { +/// +/// Note: This must be installed before any metrics are `described`. +/// +/// Caution: This only configures the global recorder and does not spawn the exporter. +/// Callers must run [`PrometheusRecorder::spawn_upkeep`] manually. +pub fn install_prometheus_recorder() -> &'static PrometheusRecorder { &PROMETHEUS_RECORDER_HANDLE } /// The default Prometheus recorder handle. We use a global static to ensure that it is only /// installed once. -static PROMETHEUS_RECORDER_HANDLE: LazyLock = +static PROMETHEUS_RECORDER_HANDLE: LazyLock = LazyLock::new(|| PrometheusRecorder::install().unwrap()); -/// Prometheus recorder installer +/// A handle to the Prometheus recorder. +/// +/// This is intended to be used as the global recorder. +/// Callers must ensure that [`PrometheusRecorder::spawn_upkeep`] is called once. #[derive(Debug)] -pub struct PrometheusRecorder; +pub struct PrometheusRecorder { + handle: PrometheusHandle, + upkeep: AtomicBool, +} impl PrometheusRecorder { + const fn new(handle: PrometheusHandle) -> Self { + Self { handle, upkeep: AtomicBool::new(false) } + } + + /// Returns a reference to the [`PrometheusHandle`]. + pub const fn handle(&self) -> &PrometheusHandle { + &self.handle + } + + /// Spawns the upkeep task if there hasn't been one spawned already. + /// + /// ## Panics + /// + /// This method must be called from within an existing Tokio runtime or it will panic. + /// + /// See also [`PrometheusHandle::run_upkeep`] + pub fn spawn_upkeep(&self) { + if self + .upkeep + .compare_exchange( + false, + true, + std::sync::atomic::Ordering::SeqCst, + std::sync::atomic::Ordering::Acquire, + ) + .is_err() + { + return; + } + + let handle = self.handle.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + handle.run_upkeep(); + } + }); + } + /// Installs Prometheus as the metrics recorder. - pub fn install() -> eyre::Result { + /// + /// Caution: This only configures the global recorder and does not spawn the exporter. + /// Callers must run [`Self::spawn_upkeep`] manually. + pub fn install() -> eyre::Result { let recorder = PrometheusBuilder::new().build_recorder(); let handle = recorder.handle(); @@ -31,7 +84,7 @@ impl PrometheusRecorder { .install() .wrap_err("Couldn't set metrics recorder.")?; - Ok(handle) + Ok(Self::new(handle)) } } @@ -52,7 +105,7 @@ mod tests { process.describe(); process.collect(); - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + let metrics = PROMETHEUS_RECORDER_HANDLE.handle.render(); assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 87521349d4d..22c064d62f8 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -103,7 +103,7 @@ impl MetricServer { let hook = hook.clone(); let service = tower::service_fn(move |_| { (hook)(); - let metrics = handle.render(); + let metrics = handle.handle().render(); let mut response = Response::new(metrics); response .headers_mut() From 1e7189d3e4f2b7e744bf59906a791784621cf6e9 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 19 Nov 2024 19:39:28 +0400 Subject: [PATCH 554/970] feat: trait-based storage API (#12616) Co-authored-by: joshie <93316087+joshieDo@users.noreply.github.com> --- Cargo.lock | 5 + bin/reth/src/commands/debug_cmd/execution.rs | 2 +- crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/common.rs | 26 ++++- crates/cli/commands/src/import.rs | 2 +- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/e2e-test-utils/Cargo.toml | 2 + crates/e2e-test-utils/src/lib.rs | 15 ++- crates/ethereum/node/src/node.rs | 11 +- crates/exex/test-utils/src/lib.rs | 16 ++- crates/node/builder/src/builder/mod.rs | 29 +++-- crates/node/builder/src/launch/common.rs | 29 +++-- crates/node/builder/src/launch/engine.rs | 6 +- crates/node/builder/src/launch/mod.rs | 10 +- crates/node/builder/src/node.rs | 2 + crates/node/builder/src/setup.rs | 9 +- crates/node/types/src/lib.rs | 106 +++++++++++++----- .../cli/src/commands/build_pipeline.rs | 3 +- crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/src/node.rs | 61 +++++++++- crates/primitives-traits/src/block/mod.rs | 4 +- crates/primitives/src/lib.rs | 9 ++ crates/stages/stages/src/stages/bodies.rs | 5 +- .../stages/stages/src/test_utils/test_db.rs | 4 +- crates/storage/provider/Cargo.toml | 3 + .../provider/src/providers/database/chain.rs | 26 +++++ .../provider/src/providers/database/mod.rs | 22 +++- .../src/providers/database/provider.rs | 74 ++++++------ crates/storage/provider/src/providers/mod.rs | 32 +++++- .../storage/provider/src/test_utils/mock.rs | 11 +- crates/storage/provider/src/test_utils/mod.rs | 12 +- crates/storage/provider/src/traits/block.rs | 2 +- crates/storage/storage-api/Cargo.toml | 1 + crates/storage/storage-api/src/chain.rs | 72 ++++++++++++ crates/storage/storage-api/src/lib.rs | 3 + examples/custom-engine-types/src/main.rs | 15 ++- 36 files changed, 485 insertions(+), 148 deletions(-) create mode 100644 crates/storage/provider/src/providers/database/chain.rs create mode 100644 crates/storage/storage-api/src/chain.rs diff --git a/Cargo.lock b/Cargo.lock index adacf448f20..1b38e056655 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6683,6 +6683,7 @@ dependencies = [ "reth-network", "reth-network-p2p", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-events", @@ -7089,10 +7090,12 @@ dependencies = [ "reth-db", "reth-engine-local", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-primitives", "reth-provider", "reth-rpc-layer", "reth-stages-types", @@ -8626,6 +8629,7 @@ dependencies = [ "reth-node-types", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-api", @@ -9189,6 +9193,7 @@ dependencies = [ "reth-db-models", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index da928645b9f..a6203ea2a73 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -59,7 +59,7 @@ pub struct Command { } impl> Command { - fn build_pipeline, Client>( + fn build_pipeline + CliNodeTypes, Client>( &self, config: &Config, client: Client, diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 7e27d9b4e2e..90acb82d71d 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -32,6 +32,7 @@ reth-fs-util.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } +reth-node-api.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 0e4eb2723c3..e557f15da6b 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -10,12 +10,16 @@ use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; -use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_provider::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, StaticFileProviderFactory, +}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -191,5 +195,21 @@ impl AccessRights { /// Helper trait with a common set of requirements for the /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. -pub trait CliNodeTypes: NodeTypesWithEngine {} -impl CliNodeTypes for N where N: NodeTypesWithEngine {} +pub trait CliNodeTypes: + NodeTypesWithEngine + + NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block: reth_node_api::Block, + >, + > +{ +} +impl CliNodeTypes for N where + N: NodeTypesWithEngine + + NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block: reth_node_api::Block, + >, + > +{ +} diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 539211a22f7..c1f6408b49b 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -167,7 +167,7 @@ pub fn build_import_pipeline( executor: E, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, E: BlockExecutorProvider, { diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index e71861a988d..4f47a70b02d 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -113,7 +113,7 @@ impl> Command Ok(()) } - fn build_pipeline>( + fn build_pipeline + CliNodeTypes>( self, config: Config, provider_factory: ProviderFactory, diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index c4c74ebcdf1..9c40e2ba99d 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -19,7 +19,9 @@ reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true +reth-node-api.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 1e9b39058e6..df459f641b4 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -5,12 +5,12 @@ use std::sync::Arc; use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, + builder::{FullNodePrimitives, NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ @@ -18,7 +18,7 @@ use reth_node_builder::{ FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; -use reth_provider::providers::{BlockchainProvider, BlockchainProvider2}; +use reth_provider::providers::{BlockchainProvider, BlockchainProvider2, NodeTypesForProvider}; use tracing::{span, Level}; use wallet::Wallet; @@ -53,12 +53,14 @@ pub async fn setup( attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForProvider + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, N::AddOns: RethRpcAddOns>, + N::Primitives: + FullNodePrimitives>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -120,7 +122,8 @@ pub async fn setup_engine( where N: Default + Node>>> - + NodeTypesWithEngine, + + NodeTypesWithEngine + + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -132,6 +135,8 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, + N::Primitives: + FullNodePrimitives>, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 1615ef0e686..a2ae2374b96 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -26,7 +26,7 @@ use reth_node_builder::{ }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::EthPrimitives; -use reth_provider::CanonStateSubscriptions; +use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -74,6 +74,7 @@ impl NodeTypes for EthereumNode { type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for EthereumNode { @@ -94,7 +95,13 @@ pub type EthereumAddOns = RpcAddOns< impl Node for EthereumNode where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: NodeTypesWithDB + + NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 5c3468a3c1c..5b2267505c5 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -45,10 +45,10 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{Head, SealedBlockWithSenders}; +use reth_primitives::{EthPrimitives, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, - BlockReader, ProviderFactory, + BlockReader, EthStorage, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -118,9 +118,10 @@ where pub struct TestNode; impl NodeTypes for TestNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = reth_trie_db::MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for TestNode { @@ -129,7 +130,14 @@ impl NodeTypesWithEngine for TestNode { impl Node for TestNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 89892ed5985..3ad90a493f1 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -22,8 +22,8 @@ use reth_network::{ NetworkHandle, NetworkManager, }; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, - NodeTypesWithEngine, + FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, + NodeTypesWithDBAdapter, NodeTypesWithEngine, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -31,7 +31,10 @@ use reth_node_core::{ node_config::NodeConfig, primitives::Head, }; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; +use reth_provider::{ + providers::{BlockchainProvider, NodeTypesForProvider}, + ChainSpecProvider, FullProvider, +}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use revm_primitives::EnvKzgSettings; @@ -240,7 +243,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, { self.with_types_and_provider() } @@ -250,7 +253,7 @@ where self, ) -> NodeBuilderWithTypes, P>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { NodeBuilderWithTypes::new(self.config, self.database) @@ -264,7 +267,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -301,7 +304,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -313,7 +316,7 @@ where NodeBuilderWithTypes, P>>, > where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { WithLaunchContext { @@ -332,7 +335,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -355,13 +358,15 @@ where >, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, >, + N::Primitives: + FullNodePrimitives>, { self.node(node).launch().await } @@ -549,9 +554,11 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, + T::Primitives: + FullNodePrimitives>, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index e01d117e7bc..903b0980354 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -26,7 +26,7 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{FullNodePrimitives, FullNodeTypes, NodeTypes, NodeTypesWithDB}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -404,9 +404,12 @@ where /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** - pub async fn create_provider_factory>( - &self, - ) -> eyre::Result> { + pub async fn create_provider_factory(&self) -> eyre::Result> + where + N: ProviderNodeTypes, + N::Primitives: + FullNodePrimitives>, + { let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), @@ -467,9 +470,14 @@ where } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. - pub async fn with_provider_factory>( + pub async fn with_provider_factory( self, - ) -> eyre::Result, ProviderFactory>>> { + ) -> eyre::Result, ProviderFactory>>> + where + N: ProviderNodeTypes, + N::Primitives: + FullNodePrimitives>, + { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { inner: self.inner, @@ -482,7 +490,7 @@ where impl LaunchContextWith, ProviderFactory>> where - T: NodeTypesWithDB, + T: ProviderNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &T::DB { @@ -748,10 +756,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree, - Types: NodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -913,7 +918,7 @@ impl where T: FullNodeTypes< Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: NodeTypes, + Types: ProviderNodeTypes, >, CB: NodeComponentsBuilder, { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 86ab0b9a3d7..5a8405047b0 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -19,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, - PayloadTypes, + BuiltPayload, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, + PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -77,6 +77,8 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, + Types::Primitives: + FullNodePrimitives>, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4f9e850c97f..c4146f48306 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -17,18 +17,18 @@ use reth_beacon_consensus::{ BeaconConsensusEngine, }; use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_api::{AddOnsContext, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, ProviderNodeTypes}; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; @@ -98,10 +98,12 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, + Types::Primitives: + FullNodePrimitives>, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 62c710ea802..ce7d12fee3d 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -71,6 +71,8 @@ where type ChainSpec = ::ChainSpec; type StateCommitment = ::StateCommitment; + + type Storage = ::Storage; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index db188402ca8..337e37eeedd 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,6 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; +use reth_node_api::FullNodePrimitives; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -40,6 +41,8 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, + N::Primitives: + FullNodePrimitives>, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -85,8 +88,12 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
+ 'static, - B: BodyDownloader + 'static, + B: BodyDownloader< + Body = <::Block as reth_node_api::Block>::Body, + > + 'static, Executor: BlockExecutorProvider, + N::Primitives: + FullNodePrimitives>, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index f8770a3c014..2da8180a956 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -9,12 +9,11 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +use core::{fmt::Debug, marker::PhantomData}; pub use reth_primitives_traits::{ Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, }; -use core::marker::PhantomData; - use reth_chainspec::EthChainSpec; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -35,6 +34,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type ChainSpec: EthChainSpec; /// The type used to perform state commitment operations. type StateCommitment: StateCommitment; + /// The type responsible for writing chain primitives to storage. + type Storage: Default + Send + Sync + Unpin + Debug + 'static; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -86,6 +87,7 @@ where type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; type StateCommitment = Types::StateCommitment; + type Storage = Types::Storage; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -105,86 +107,128 @@ where } /// A [`NodeTypes`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); +#[derive(Debug)] +pub struct AnyNodeTypes

( + PhantomData

, + PhantomData, + PhantomData, + PhantomData, +); + +impl Default for AnyNodeTypes { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypes { + /// Creates a new instance of [`AnyNodeTypes`]. + pub const fn new() -> Self { + Self(PhantomData, PhantomData, PhantomData, PhantomData) + } -impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypes { + AnyNodeTypes::new() } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } /// A [`NodeTypesWithEngine`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +#[derive(Debug)] +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + _base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl Default for AnyNodeTypesWithEngine { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypesWithEngine { + /// Creates a new instance of [`AnyNodeTypesWithEngine`]. + pub const fn new() -> Self { + Self { _base: AnyNodeTypes::new(), _engine: PhantomData } + } + /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } + pub const fn primitives(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } + pub const fn engine(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Engine = E; } diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index 88dc0989717..8ebefdcc0b4 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -1,5 +1,6 @@ use alloy_primitives::B256; use futures_util::{Stream, StreamExt}; +use reth_cli_commands::common::CliNodeTypes; use reth_config::Config; use reth_consensus::Consensus; use reth_downloaders::{ @@ -38,7 +39,7 @@ pub(crate) async fn build_import_pipeline( disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: CliNodeTypes + ProviderNodeTypes, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 03ea75a26cd..2e3e9fb4f1d 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-db.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 70f32c01ffd..6cdffd09059 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -5,10 +5,12 @@ use std::sync::Arc; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; +use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, FullNodePrimitives, NodeAddOns, + PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -28,7 +30,11 @@ use reth_optimism_rpc::{ OpEthApi, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned, TxType}; +use reth_provider::{ + providers::ChainStorage, BlockBodyWriter, CanonStateSubscriptions, DBProvider, EthStorage, + ProviderResult, +}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -43,7 +49,42 @@ use crate::{ txpool::{OpTransactionPool, OpTransactionValidator}, OpEngineTypes, }; +/// Optimism primitive types. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct OpPrimitives; + +impl FullNodePrimitives for OpPrimitives { + type Block = Block; + type SignedTx = TransactionSigned; + type TxType = TxType; + type Receipt = Receipt; +} + +/// Storage implementation for Optimism. +#[derive(Debug, Default, Clone)] +pub struct OpStorage(EthStorage); + +impl> BlockBodyWriter for OpStorage { + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + self.0.write_block_bodies(provider, bodies) + } +} +impl ChainStorage for OpStorage { + fn writer( + &self, + ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } +} /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -90,7 +131,14 @@ impl OpNode { impl Node for OpNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -115,9 +163,10 @@ where } impl NodeTypes for OpNode { - type Primitives = reth_primitives::EthPrimitives; // todo: replace with OpPrimitives when EthPrimitives is only used in reth-ethereum-* crates + type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = OpStorage; } impl NodeTypesWithEngine for OpNode { @@ -144,7 +193,7 @@ impl OpAddOns { impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, @@ -172,7 +221,7 @@ where impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 5b1faeafbb7..67658c39e07 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -10,9 +10,9 @@ use reth_codecs::Compact; use crate::{BlockHeader, FullBlockHeader, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: Block + Compact {} +pub trait FullBlock: Block {} -impl FullBlock for T where T: Block + Compact {} +impl FullBlock for T where T: Block {} /// Abstraction of block data type. // todo: make sealable super-trait, depends on diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b2f43892018..c3682ecba1d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -79,6 +79,15 @@ pub mod serde_bincode_compat { #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct EthPrimitives; +#[cfg(feature = "reth-codec")] +impl reth_primitives_traits::FullNodePrimitives for EthPrimitives { + type Block = crate::Block; + type SignedTx = crate::TransactionSigned; + type TxType = crate::TxType; + type Receipt = crate::Receipt; +} + +#[cfg(not(feature = "reth-codec"))] impl NodePrimitives for EthPrimitives { type Block = crate::Block; type SignedTx = crate::TransactionSigned; diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 07b97574972..80185eade87 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -198,7 +198,10 @@ where // Write bodies to database. This will NOT write transactions to database as we've already // written them directly to static files. provider.append_block_bodies( - buffer.into_iter().map(|response| (response.block_number(), response.into_body())), + buffer + .into_iter() + .map(|response| (response.block_number(), response.into_body())) + .collect(), )?; // The stage is "done" if: diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 772e9cb78d0..2f9712f8436 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -15,7 +15,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - Account, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, + Account, EthPrimitives, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -142,7 +142,7 @@ impl TestStageDB { /// Insert header to static file if `writer` exists, otherwise to DB. pub fn insert_header( - writer: Option<&mut StaticFileProviderRWRefMut<'_, ()>>, + writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, td: U256, diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index eff0540638a..674f02adabc 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -111,6 +112,7 @@ serde = [ "revm/serde", "reth-codecs/serde", "reth-optimism-primitives?/serde", + "reth-primitives-traits/serde", ] test-utils = [ "reth-db/test-utils", @@ -122,6 +124,7 @@ test-utils = [ "reth-evm/test-utils", "reth-network-p2p/test-utils", "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils", "reth-trie-db/test-utils", diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs new file mode 100644 index 00000000000..8f9a6395a9d --- /dev/null +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -0,0 +1,26 @@ +use crate::{providers::NodeTypes, DatabaseProvider}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_node_types::FullNodePrimitives; +use reth_primitives::EthPrimitives; +use reth_storage_api::{ChainStorageWriter, EthStorage}; + +/// Trait that provides access to implementations of [`ChainStorage`] +pub trait ChainStorage: Send + Sync { + /// Provides access to the chain writer. + fn writer(&self) -> impl ChainStorageWriter, Primitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes; +} + +impl ChainStorage for EthStorage { + fn writer( + &self, + ) -> impl ChainStorageWriter, EthPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } +} diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 94c83bbb442..a64bb2578dd 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -44,6 +44,9 @@ use super::ProviderNodeTypes; mod metrics; +mod chain; +pub use chain::*; + /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. @@ -56,19 +59,22 @@ pub struct ProviderFactory { static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, + /// The node storage handler. + storage: Arc, } impl fmt::Debug for ProviderFactory where - N: NodeTypesWithDB, + N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) + .field("storage", &storage) .finish() } } @@ -80,7 +86,13 @@ impl ProviderFactory { chain_spec: Arc, static_file_provider: StaticFileProvider, ) -> Self { - Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } + Self { + db, + chain_spec, + static_file_provider, + prune_modes: PruneModes::none(), + storage: Default::default(), + } } /// Enables metrics on the static file provider. @@ -121,6 +133,7 @@ impl>> ProviderFactory { chain_spec, static_file_provider, prune_modes: PruneModes::none(), + storage: Default::default(), }) } } @@ -139,6 +152,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), )) } @@ -153,6 +167,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), ))) } @@ -617,6 +632,7 @@ impl Clone for ProviderFactory { chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), + storage: self.storage.clone(), } } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b93112e7084..4690e27821e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,19 +1,24 @@ use crate::{ bundle_state::StorageRevertsIter, - providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, + providers::{ + database::{chain::ChainStorage, metrics}, + static_file::StaticFileWriter, + ProviderNodeTypes, StaticFileProvider, + }, to_range, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, writer::UnifiedStorageWriter, - AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, - HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, + BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, + DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, + LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, + StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, + StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -47,6 +52,7 @@ use reth_primitives::{ SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; +use reth_primitives_traits::{BlockBody as _, FullNodePrimitives}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; @@ -138,6 +144,8 @@ pub struct DatabaseProvider { static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, + /// Node storage handler. + storage: Arc, } impl DatabaseProvider { @@ -224,8 +232,9 @@ impl DatabaseProvider { chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } } @@ -277,9 +286,7 @@ impl TryIntoHistoricalStateProvider for Databa } } -impl + 'static> - DatabaseProvider -{ +impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** @@ -367,8 +374,9 @@ impl DatabaseProvider { chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } /// Consume `DbTx` or `DbTxMut`. @@ -2899,8 +2907,8 @@ impl StateReader for DatabaseProvider { } } -impl + 'static> - BlockExecutionWriter for DatabaseProvider +impl BlockExecutionWriter + for DatabaseProvider { fn take_block_and_execution_range( &self, @@ -3101,10 +3109,11 @@ impl + } } -impl + 'static> BlockWriter +impl BlockWriter for DatabaseProvider { - type Body = BlockBody; + type Body = + <::Block as reth_primitives_traits::Block>::Body; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3266,45 +3275,32 @@ impl + fn append_block_bodies( &self, - bodies: impl Iterator)>, + bodies: Vec<(BlockNumber, Option)>, ) -> ProviderResult<()> { let mut block_indices_cursor = self.tx.cursor_write::()?; let mut tx_block_cursor = self.tx.cursor_write::()?; - let mut ommers_cursor = self.tx.cursor_write::()?; - let mut withdrawals_cursor = self.tx.cursor_write::()?; // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); - for (block_number, body) in bodies { - let tx_count = body.as_ref().map(|b| b.transactions.len() as u64).unwrap_or_default(); + for (block_number, body) in &bodies { + let tx_count = body.as_ref().map(|b| b.transactions().len() as u64).unwrap_or_default(); let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; // insert block meta - block_indices_cursor.append(block_number, block_indices)?; + block_indices_cursor.append(*block_number, block_indices)?; next_tx_num += tx_count; let Some(body) = body else { continue }; // write transaction block index - if !body.transactions.is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), block_number)?; - } - - // Write ommers if any - if !body.ommers.is_empty() { - ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; - } - - // Write withdrawals if any - if let Some(withdrawals) = body.withdrawals { - if !withdrawals.is_empty() { - withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; - } + if !body.transactions().is_empty() { + tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; } } + self.storage.writer().write_block_bodies(self, bodies)?; + Ok(()) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 3bf3e7b247f..d049243377e 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -22,7 +22,7 @@ use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubs use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB}; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, @@ -37,6 +37,7 @@ use std::{ sync::Arc, time::Instant, }; + use tracing::trace; mod database; @@ -67,10 +68,35 @@ pub use blockchain_provider::BlockchainProvider2; mod consistent; pub use consistent::ConsistentProvider; +/// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy +/// [`ProviderNodeTypes`]. +pub trait NodeTypesForProvider +where + Self: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives, + >, +{ +} + +impl NodeTypesForProvider for T where + T: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives, + > +{ +} + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. -pub trait ProviderNodeTypes: NodeTypesWithDB {} +pub trait ProviderNodeTypes +where + Self: NodeTypesForProvider + NodeTypesWithDB, +{ +} -impl ProviderNodeTypes for T where T: NodeTypesWithDB {} +impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} /// The main type for interacting with the blockchain. /// diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 9661ab2057c..43bb1e80942 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, - ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EthStorage, EvmEnvProvider, + HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, + StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; use alloy_eips::{ @@ -23,7 +23,7 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; @@ -158,9 +158,10 @@ impl MockEthProvider { pub struct MockNode; impl NodeTypes for MockNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl DatabaseProviderFactory for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index c0e80930b31..2c3795573c2 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use crate::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + HashingWriter, ProviderFactory, TrieWriter, +}; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ @@ -6,7 +9,7 @@ use reth_db::{ DatabaseEnv, }; use reth_errors::ProviderResult; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -22,10 +25,11 @@ pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< - (), + reth_primitives::EthPrimitives, reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, reth_trie_db::MerklePatriciaTrie, + crate::EthStorage, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. @@ -51,7 +55,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis>( +pub fn insert_genesis>( provider_factory: &ProviderFactory, chain_spec: Arc, ) -> ProviderResult { diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 50fb032923d..a0dae1783ea 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -50,7 +50,7 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: impl Iterator)>, + bodies: Vec<(BlockNumber, Option)>, ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 2b13f6332f8..c059eb0d6e9 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -18,6 +18,7 @@ reth-db-models.workspace = true reth-db-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs new file mode 100644 index 00000000000..099f61f1bcb --- /dev/null +++ b/crates/storage/storage-api/src/chain.rs @@ -0,0 +1,72 @@ +use crate::DBProvider; +use alloy_primitives::BlockNumber; +use reth_db::{ + cursor::DbCursorRW, + models::{StoredBlockOmmers, StoredBlockWithdrawals}, + tables, + transaction::DbTxMut, +}; +use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; +use reth_storage_errors::provider::ProviderResult; + +/// Trait that implements how block bodies are written to the storage. +/// +/// Note: Within the current abstraction, this should only write to tables unrelated to +/// transactions. Writing of transactions is handled separately. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyWriter { + /// Writes a set of block bodies to the storage. + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(BlockNumber, Option)>, + ) -> ProviderResult<()>; +} + +/// Trait that implements how chain-specific types are written to the storage. +pub trait ChainStorageWriter: + BlockBodyWriter::Body> +{ +} +impl ChainStorageWriter for T where + T: BlockBodyWriter::Body> +{ +} + +/// Ethereum storage implementation. +#[derive(Debug, Default, Clone, Copy)] +pub struct EthStorage; + +impl BlockBodyWriter for EthStorage +where + Provider: DBProvider, +{ + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + let mut ommers_cursor = provider.tx_ref().cursor_write::()?; + let mut withdrawals_cursor = + provider.tx_ref().cursor_write::()?; + + for (block_number, body) in bodies { + let Some(body) = body else { continue }; + + // Write ommers if any + if !body.ommers.is_empty() { + ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + } + + // Write withdrawals if any + if let Some(withdrawals) = body.withdrawals { + if !withdrawals.is_empty() { + withdrawals_cursor + .append(block_number, StoredBlockWithdrawals { withdrawals })?; + } + } + } + + Ok(()) + } +} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 7b7ad761476..be52a817e93 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -22,6 +22,9 @@ pub use block_id::*; mod block_hash; pub use block_hash::*; +mod chain; +pub use chain::*; + mod header; pub use header::*; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 704ecb7e3c4..c21e893e05a 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -42,7 +42,8 @@ use reth::{ PayloadBuilderConfig, }, network::NetworkHandle, - providers::{CanonStateSubscriptions, StateProviderFactory}, + primitives::EthPrimitives, + providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, rpc::eth::EthApi, tasks::TaskManager, transaction_pool::TransactionPool, @@ -227,9 +228,10 @@ struct MyCustomNode; /// Configure the node types impl NodeTypes for MyCustomNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } /// Configure the node types with the custom engine types @@ -254,7 +256,14 @@ pub type MyNodeAddOns = RpcAddOns< /// This provides a preset configuration for the node impl Node for MyCustomNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, From 50c875b33c089db92c4a9c088e0427c003bcefe2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 19 Nov 2024 15:39:41 +0000 Subject: [PATCH 555/970] feat(trie): short-circuit account/storage reveal in sparse trie (#12663) --- crates/trie/sparse/src/state.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index d7557a7a365..0b0db140115 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -42,6 +42,10 @@ impl SparseStateTrie { account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.revealed.contains_key(&account) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; @@ -69,6 +73,10 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.revealed.get(&account).is_some_and(|v| v.contains(&slot)) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; From 3408059393bcf03f6727f790ec52f28114e25d02 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 19 Nov 2024 17:01:44 +0100 Subject: [PATCH 556/970] feat(trie): introduce `TRIE_ACCOUNT_RLP_MAX_SIZE` constant (#12638) --- crates/trie/parallel/src/proof.rs | 4 ++-- crates/trie/parallel/src/root.rs | 4 ++-- crates/trie/trie/src/constants.rs | 24 ++++++++++++++++++++++++ crates/trie/trie/src/lib.rs | 4 ++++ crates/trie/trie/src/proof.rs | 4 ++-- crates/trie/trie/src/trie.rs | 4 ++-- crates/trie/trie/src/witness.rs | 4 ++-- 7 files changed, 38 insertions(+), 10 deletions(-) create mode 100644 crates/trie/trie/src/constants.rs diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index bafb9917c60..88321c821a8 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -17,7 +17,7 @@ use reth_trie::{ proof::StorageProof, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, walker::TrieWalker, - HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; @@ -153,7 +153,7 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index e432b91062c..7a316d8b15f 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -14,7 +14,7 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{collections::HashMap, sync::Arc}; @@ -149,7 +149,7 @@ where ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { TrieElement::Branch(node) => { diff --git a/crates/trie/trie/src/constants.rs b/crates/trie/trie/src/constants.rs new file mode 100644 index 00000000000..7354290d959 --- /dev/null +++ b/crates/trie/trie/src/constants.rs @@ -0,0 +1,24 @@ +/// The maximum size of RLP encoded trie account in bytes. +/// 2 (header) + 4 * 1 (field lens) + 8 (nonce) + 32 * 3 (balance, storage root, code hash) +pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{B256, U256}; + use alloy_rlp::Encodable; + use reth_trie_common::TrieAccount; + + #[test] + fn account_rlp_max_size() { + let account = TrieAccount { + nonce: u64::MAX, + balance: U256::MAX, + storage_root: B256::from_slice(&[u8::MAX; 32]), + code_hash: B256::from_slice(&[u8::MAX; 32]), + }; + let mut encoded = Vec::new(); + account.encode(&mut encoded); + assert_eq!(encoded.len(), TRIE_ACCOUNT_RLP_MAX_SIZE); + } +} diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index bb568ae8b8c..26bdc751124 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,6 +13,10 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + /// The implementation of a container for storing intermediate changes to a trie. /// The container indicates when the trie has been modified. pub mod prefix_set; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index e99d686aca7..895a3de153d 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -4,7 +4,7 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, - HashBuilder, Nibbles, + HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_primitives::{ keccak256, @@ -104,7 +104,7 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { match account_node { diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 1bf8cf1ce79..74faf7bbc60 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -7,7 +7,7 @@ use crate::{ trie_cursor::TrieCursorFactory, updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, - HashBuilder, Nibbles, TrieAccount, + HashBuilder, Nibbles, TrieAccount, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; @@ -178,7 +178,7 @@ where } }; - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut hashed_entries_walked = 0; let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 6f6a66a16eb..46f85c4d82e 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -3,7 +3,7 @@ use crate::{ prefix_set::TriePrefixSetsMut, proof::{Proof, StorageProof}, trie_cursor::TrieCursorFactory, - HashedPostState, + HashedPostState, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ @@ -97,7 +97,7 @@ where // Attempt to compute state root from proofs and gather additional // information for the witness. - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_trie_nodes = BTreeMap::default(); for (hashed_address, hashed_slots) in proof_targets { let storage_multiproof = account_multiproof From 565fd4d1336e7ae24fef4d8f38a0feae9a712f47 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Wed, 20 Nov 2024 00:09:22 +0700 Subject: [PATCH 557/970] chore(exex): emit warn log when WAL grows beyond a certain number of blocks (#12634) --- crates/exex/exex/src/manager.rs | 15 ++++++++++++++- crates/exex/exex/src/wal/cache.rs | 5 +++++ crates/exex/exex/src/wal/mod.rs | 5 +++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index a17de660862..e8902e0f352 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -10,7 +10,7 @@ use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; -use reth_tracing::tracing::debug; +use reth_tracing::tracing::{debug, warn}; use std::{ collections::VecDeque, fmt::Debug, @@ -35,6 +35,12 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; /// or 17 minutes of 1-second blocks. pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; +/// The maximum number of blocks allowed in the WAL before emitting a warning. +/// +/// This constant defines the threshold for the Write-Ahead Log (WAL) size. If the number of blocks +/// in the WAL exceeds this limit, a warning is logged to indicate potential issues. +pub const WAL_BLOCKS_WARNING: usize = 128; + /// The source of the notification. /// /// This distinguishment is needed to not commit any pipeline notificatations to [WAL](`Wal`), @@ -377,6 +383,13 @@ where .unwrap(); self.wal.finalize(lowest_finished_height)?; + if self.wal.num_blocks() > WAL_BLOCKS_WARNING { + warn!( + target: "exex::manager", + blocks = ?self.wal.num_blocks(), + "WAL contains too many blocks and is not getting cleared. That will lead to increased disk space usage. Check that you emit the FinishedHeight event from your ExExes." + ); + } } else { let unfinalized_exexes = exex_finished_heights .into_iter() diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 882b65e1589..86943f33cfa 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -35,6 +35,11 @@ impl BlockCache { self.notification_max_blocks.is_empty() } + /// Returns the number of blocks in the cache. + pub(super) fn num_blocks(&self) -> usize { + self.committed_blocks.len() + } + /// Removes all files from the cache that has notifications with a tip block less than or equal /// to the given block number. /// diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index a2e8ee8e6c6..41a7829a70f 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -66,6 +66,11 @@ impl Wal { ) -> eyre::Result> + '_>> { self.inner.iter_notifications() } + + /// Returns the number of blocks in the WAL. + pub fn num_blocks(&self) -> usize { + self.inner.block_cache().num_blocks() + } } /// Inner type for the WAL. From d49f91378b19f17e5592eb56aca835656f234252 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 18:31:56 +0100 Subject: [PATCH 558/970] chore: use jsonrpsee server crate directly (#12673) --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/node/metrics/Cargo.toml | 2 +- crates/node/metrics/src/server.rs | 10 +++++----- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b38e056655..fb9018a55b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8128,7 +8128,7 @@ version = "1.1.2" dependencies = [ "eyre", "http", - "jsonrpsee", + "jsonrpsee-server", "metrics", "metrics-exporter-prometheus", "metrics-process", diff --git a/Cargo.toml b/Cargo.toml index 58cdd1f8ca7..002b85f125a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -566,6 +566,7 @@ if-addrs = "0.13" # rpc jsonrpsee = "0.24" jsonrpsee-core = "0.24" +jsonrpsee-server = "0.24" jsonrpsee-http-client = "0.24" jsonrpsee-types = "0.24" diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index a823db9b467..7e271f93ce5 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -21,7 +21,7 @@ metrics-util.workspace = true tokio.workspace = true -jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 22c064d62f8..313329fb56a 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -113,12 +113,12 @@ impl MetricServer { let mut shutdown = signal.clone().ignore_guard(); tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + let _ = + jsonrpsee_server::serve_with_graceful_shutdown(io, service, &mut shutdown) .await - { - tracing::debug!(%error, "failed to serve request") - } + .inspect_err( + |error| tracing::debug!(%error, "failed to serve request"), + ); }); } }); From 2b21bcf42546746bc5d4581c562f49b36071e3a7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Nov 2024 18:44:10 +0100 Subject: [PATCH 559/970] chore(sdk): Add adapter type for `NodePrimitives::Receipt` (#12674) --- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/node.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 819825d635f..79dff4ae36b 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -80,7 +80,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{FullNodePrimitives, NodePrimitives}; +pub use node::{FullNodePrimitives, NodePrimitives, ReceiptTy}; /// Helper trait that requires arbitrary implementation if the feature is enabled. #[cfg(any(feature = "test-utils", feature = "arbitrary"))] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index c11a19a105a..180920d3934 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -73,3 +73,6 @@ where type TxType = T::TxType; type Receipt = T::Receipt; } + +/// Helper adapter type for accessing [`NodePrimitives`] receipt type. +pub type ReceiptTy = ::Receipt; From 37181c357a2e4e91fb7048671cdb09f5b5afdaaf Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 19 Nov 2024 18:58:46 +0100 Subject: [PATCH 560/970] feat(executor): add init methods to set TxEnv overrides (#12551) Co-authored-by: Matthias Seitz --- crates/ethereum/evm/src/execute.rs | 14 ++++++++-- crates/evm/src/either.rs | 8 ++++++ crates/evm/src/execute.rs | 44 +++++++++++++++++++++++++++--- crates/evm/src/lib.rs | 15 ++++++++++ crates/optimism/evm/src/execute.rs | 14 ++++++++-- 5 files changed, 87 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index fa14e260d65..e339268a99a 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -18,7 +18,7 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_primitives::{BlockWithSenders, Receipt}; use reth_revm::db::State; @@ -83,6 +83,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -96,7 +98,7 @@ where /// Creates a new [`EthExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -130,6 +132,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -172,6 +178,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 82f84301f03..85bc7e7f9a7 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -6,6 +6,7 @@ use crate::{ execute::{BatchExecutor, BlockExecutorProvider, Executor}, system_calls::OnStateHook, }; +use alloc::boxed::Box; use alloy_primitives::BlockNumber; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; @@ -70,6 +71,13 @@ where type Output = BlockExecutionOutput; type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + match self { + Self::Left(a) => a.init(tx_env_overrides), + Self::Right(b) => b.init(tx_env_overrides), + } + } + fn execute(self, input: Self::Input<'_>) -> Result { match self { Self::Left(a) => a.execute(input), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 677a15dfa1b..42c756f4d93 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,9 +6,8 @@ pub use reth_execution_errors::{ }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; -use revm::db::states::bundle_state::BundleRetention; -use crate::system_calls::OnStateHook; +use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; @@ -17,7 +16,10 @@ use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; -use revm::{db::BundleState, State}; +use revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; use revm_primitives::{db::Database, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output @@ -32,6 +34,9 @@ pub trait Executor { /// The error type returned by the executor. type Error; + /// Initialize the executor with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Consumes the type and executes the block. /// /// # Note @@ -184,6 +189,9 @@ where /// The error type returned by this strategy's methods. type Error: From + core::error::Error; + /// Initialize the strategy with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, @@ -329,6 +337,10 @@ where type Output = BlockExecutionOutput; type Error = S::Error; + fn init(&mut self, env_overrides: Box) { + self.strategy.init(env_overrides); + } + fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; @@ -480,7 +492,7 @@ mod tests { use alloy_primitives::U256; use reth_chainspec::{ChainSpec, MAINNET}; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::bytes; + use revm_primitives::{bytes, TxEnv}; use std::sync::Arc; #[derive(Clone, Default)] @@ -703,4 +715,28 @@ mod tests { assert_eq!(block_execution_output.requests, expected_apply_post_execution_changes_result); assert_eq!(block_execution_output.state, expected_finish_result); } + + #[test] + fn test_tx_env_overrider() { + let strategy_factory = TestExecutorStrategyFactory { + execute_transactions_result: ExecuteOutput { + receipts: vec![Receipt::default()], + gas_used: 10, + }, + apply_post_execution_changes_result: Requests::new(vec![bytes!("deadbeef")]), + finish_result: BundleState::default(), + }; + let provider = BasicBlockExecutorProvider::new(strategy_factory); + let db = CacheDB::>::default(); + + // if we want to apply tx env overrides the executor must be mut. + let mut executor = provider.executor(db); + // execute consumes the executor, so we can only call it once. + // let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + executor.init(Box::new(|tx_env: &mut TxEnv| { + tx_env.nonce.take(); + })); + let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + assert!(result.is_ok()); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index d20dbe4594a..f01701d5989 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -212,3 +212,18 @@ pub struct NextBlockEnvAttributes { /// The randomness value for the next block. pub prev_randao: B256, } + +/// Function hook that allows to modify a transaction environment. +pub trait TxEnvOverrides { + /// Apply the overrides by modifying the given `TxEnv`. + fn apply(&mut self, env: &mut TxEnv); +} + +impl TxEnvOverrides for F +where + F: FnMut(&mut TxEnv), +{ + fn apply(&mut self, env: &mut TxEnv) { + self(env) + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index b4c2e16f593..a9a4b301573 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -15,7 +15,7 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; @@ -78,6 +78,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -91,7 +93,7 @@ where /// Creates a new [`OpExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -119,6 +121,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -197,6 +203,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); From 8c467e42917d19d6fd7efe729283cc7f6c642584 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 19:07:17 +0100 Subject: [PATCH 561/970] chore: genericify some net tx types (#12677) --- crates/net/network/src/transactions/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 0ccb4252ac3..125818da33a 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1449,9 +1449,9 @@ impl PropagationMode { /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] -struct PropagateTransaction { +struct PropagateTransaction { size: usize, - transaction: Arc, + transaction: Arc, } // === impl PropagateTransaction === @@ -1477,9 +1477,9 @@ impl PropagateTransaction { /// Helper type to construct the appropriate message to send to the peer based on whether the peer /// should receive them in full or as pooled #[derive(Debug, Clone)] -enum PropagateTransactionsBuilder { +enum PropagateTransactionsBuilder { Pooled(PooledTransactionsHashesBuilder), - Full(FullTransactionsBuilder), + Full(FullTransactionsBuilder), } impl PropagateTransactionsBuilder { @@ -1528,11 +1528,11 @@ impl PropagateTransactionsBuilder { } /// Represents how the transactions should be sent to a peer if any. -struct PropagateTransactions { +struct PropagateTransactions { /// The pooled transaction hashes to send. pooled: Option, /// The transactions to send in full. - full: Option>>, + full: Option>>, } /// Helper type for constructing the full transaction message that enforces the @@ -1540,11 +1540,11 @@ struct PropagateTransactions { /// and enforces other propagation rules for EIP-4844 and tracks those transactions that can't be /// broadcasted in full. #[derive(Debug, Clone)] -struct FullTransactionsBuilder { +struct FullTransactionsBuilder { /// The soft limit to enforce for a single broadcast message of full transactions. total_size: usize, /// All transactions to be broadcasted. - transactions: Vec>, + transactions: Vec>, /// Transactions that didn't fit into the broadcast message pooled: PooledTransactionsHashesBuilder, } From 7c7baca9807e9a9556cb217d3df8cd03c21ba4ed Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 19:25:01 +0100 Subject: [PATCH 562/970] chore: group tx manager functions (#12679) --- crates/net/network/src/network.rs | 2 +- crates/net/network/src/transactions/mod.rs | 831 +++++++++++---------- 2 files changed, 417 insertions(+), 416 deletions(-) diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 1715fa63e2f..2fa3fd90efe 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -252,7 +252,7 @@ impl PeersInfo for NetworkHandle { } } -impl Peers for NetworkHandle { +impl Peers for NetworkHandle { fn add_trusted_peer_id(&self, peer: PeerId) { self.send_message(NetworkHandleMessage::AddTrustedPeerId(peer)); } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 125818da33a..241f01ae8ab 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -313,15 +313,367 @@ impl TransactionsManager { } } -// === impl TransactionsManager === +impl TransactionsManager { + /// Returns a new handle that can send commands to this type. + pub fn handle(&self) -> TransactionsHandle { + TransactionsHandle { manager_tx: self.command_tx.clone() } + } + + /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns + /// `false` if [`TransactionsManager`] is operating close to full capacity. + fn has_capacity_for_fetching_pending_hashes(&self) -> bool { + self.pending_pool_imports_info + .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && + self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() + } + + fn report_peer_bad_transactions(&self, peer_id: PeerId) { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + self.metrics.reported_bad_transactions.increment(1); + } + + fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { + trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); + self.network.reputation_change(peer_id, kind); + } + + fn report_already_seen(&self, peer_id: PeerId) { + trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); + self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); + } + + /// Clear the transaction + fn on_good_import(&mut self, hash: TxHash) { + self.transactions_by_peers.remove(&hash); + } + + /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid + /// fetching or importing it again. + /// + /// Errors that count as bad transactions are: + /// + /// - intrinsic gas too low + /// - exceeds gas limit + /// - gas uint overflow + /// - exceeds max init code size + /// - oversized data + /// - signer account has bytecode + /// - chain id mismatch + /// - old legacy chain id + /// - tx type not supported + /// + /// (and additionally for blobs txns...) + /// + /// - no blobs + /// - too many blobs + /// - invalid kzg proof + /// - kzg error + /// - not blob transaction (tx type mismatch) + /// - wrong versioned kzg commitment hash + fn on_bad_import(&mut self, err: PoolError) { + let peers = self.transactions_by_peers.remove(&err.hash); + + // if we're _currently_ syncing, we ignore a bad transaction + if !err.is_bad_transaction() || self.network.is_syncing() { + return + } + // otherwise we penalize the peer that sent the bad transaction, with the assumption that + // the peer should have known that this transaction is bad (e.g. violating consensus rules) + if let Some(peers) = peers { + for peer_id in peers { + self.report_peer_bad_transactions(peer_id); + } + } + self.metrics.bad_imports.increment(1); + self.bad_imports.insert(err.hash); + } + + /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. + fn on_fetch_hashes_pending_fetch(&mut self) { + // try drain transaction hashes pending fetch + let info = &self.pending_pool_imports_info; + let max_pending_pool_imports = info.max_pending_pool_imports; + let has_capacity_wrt_pending_pool_imports = + |divisor| info.has_capacity(max_pending_pool_imports / divisor); + + self.transaction_fetcher + .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); + } + + fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { + let kind = match req_err { + RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, + RequestError::Timeout => ReputationChangeKind::Timeout, + RequestError::ChannelClosed | RequestError::ConnectionDropped => { + // peer is already disconnected + return + } + RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), + }; + self.report_peer(peer_id, kind); + } + + #[inline] + fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) { + let metrics = &self.metrics; + + let TxManagerPollDurations { + acc_network_events, + acc_pending_imports, + acc_tx_events, + acc_imported_txns, + acc_fetch_events, + acc_pending_fetch, + acc_cmds, + } = poll_durations; + + // update metrics for whole poll function + metrics.duration_poll_tx_manager.set(start.elapsed().as_secs_f64()); + // update metrics for nested expressions + metrics.acc_duration_poll_network_events.set(acc_network_events.as_secs_f64()); + metrics.acc_duration_poll_pending_pool_imports.set(acc_pending_imports.as_secs_f64()); + metrics.acc_duration_poll_transaction_events.set(acc_tx_events.as_secs_f64()); + metrics.acc_duration_poll_imported_transactions.set(acc_imported_txns.as_secs_f64()); + metrics.acc_duration_poll_fetch_events.set(acc_fetch_events.as_secs_f64()); + metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64()); + metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64()); + } +} + +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives, +{ + /// Processes a batch import results. + fn on_batch_import_result(&mut self, batch_results: Vec>) { + for res in batch_results { + match res { + Ok(hash) => { + self.on_good_import(hash); + } + Err(err) => { + self.on_bad_import(err); + } + } + } + } + + /// Request handler for an incoming `NewPooledTransactionHashes` + fn on_new_pooled_transaction_hashes( + &mut self, + peer_id: PeerId, + msg: NewPooledTransactionHashes, + ) { + // If the node is initially syncing, ignore transactions + if self.network.is_initially_syncing() { + return + } + if self.network.tx_gossip_disabled() { + return + } + + // get handle to peer's session, if the session is still active + let Some(peer) = self.peers.get_mut(&peer_id) else { + trace!( + peer_id = format!("{peer_id:#}"), + ?msg, + "discarding announcement from inactive peer" + ); + + return + }; + let client = peer.client_version.clone(); + + // keep track of the transactions the peer knows + let mut count_txns_already_seen_by_peer = 0; + for tx in msg.iter_hashes().copied() { + if !peer.seen_transactions.insert(tx) { + count_txns_already_seen_by_peer += 1; + } + } + if count_txns_already_seen_by_peer > 0 { + // this may occur if transactions are sent or announced to a peer, at the same time as + // the peer sends/announces those hashes to us. this is because, marking + // txns as seen by a peer is done optimistically upon sending them to the + // peer. + self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); + self.metrics + .occurrences_hash_already_seen_by_peer + .increment(count_txns_already_seen_by_peer); + + trace!(target: "net::tx", + %count_txns_already_seen_by_peer, + peer_id=format!("{peer_id:#}"), + ?client, + "Peer sent hashes that have already been marked as seen by peer" + ); + + self.report_already_seen(peer_id); + } + + // 1. filter out spam + let (validation_outcome, mut partially_valid_msg) = + self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); + + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } + + // 2. filter out transactions pending import to pool + partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); + + // 3. filter out known hashes + // + // known txns have already been successfully fetched or received over gossip. + // + // most hashes will be filtered out here since this the mempool protocol is a gossip + // protocol, healthy peers will send many of the same hashes. + // + let hashes_count_pre_pool_filter = partially_valid_msg.len(); + self.pool.retain_unknown(&mut partially_valid_msg); + if hashes_count_pre_pool_filter > partially_valid_msg.len() { + let already_known_hashes_count = + hashes_count_pre_pool_filter - partially_valid_msg.len(); + self.metrics + .occurrences_hashes_already_in_pool + .increment(already_known_hashes_count as u64); + } + + if partially_valid_msg.is_empty() { + // nothing to request + return + } + + // 4. filter out invalid entries (spam) + // + // validates messages with respect to the given network, e.g. allowed tx types + // + let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg + .msg_version() + .expect("partially valid announcement should have version") + .is_eth68() + { + // validate eth68 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_68(partially_valid_msg) + } else { + // validate eth66 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_66(partially_valid_msg) + }; + + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } + + if valid_announcement_data.is_empty() { + // no valid announcement data + return + } + + // 5. filter out already seen unknown hashes + // + // seen hashes are already in the tx fetcher, pending fetch. + // + // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx + // fetcher, hence they should be valid at this point. + let bad_imports = &self.bad_imports; + self.transaction_fetcher.filter_unseen_and_pending_hashes( + &mut valid_announcement_data, + |hash| bad_imports.contains(hash), + &peer_id, + |peer_id| self.peers.contains_key(&peer_id), + &client, + ); + + if valid_announcement_data.is_empty() { + // nothing to request + return + } + + trace!(target: "net::tx::propagation", + peer_id=format!("{peer_id:#}"), + hashes_len=valid_announcement_data.iter().count(), + hashes=?valid_announcement_data.keys().collect::>(), + msg_version=%valid_announcement_data.msg_version(), + client_version=%client, + "received previously unseen and pending hashes in announcement from peer" + ); + + // only send request for hashes to idle peer, otherwise buffer hashes storing peer as + // fallback + if !self.transaction_fetcher.is_idle(&peer_id) { + // load message version before announcement data is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + let (hashes, _version) = valid_announcement_data.into_request_hashes(); + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes, + %msg_version, + %client, + "buffering hashes announced by busy peer" + ); + + self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + + return + } + + // load message version before announcement data type is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + // + // demand recommended soft limit on response, however the peer may enforce an arbitrary + // limit on the response (2MB) + // + // request buffer is shrunk via call to pack request! + let init_capacity_req = + self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); + let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let surplus_hashes = + self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); -impl TransactionsManager -where - Pool: TransactionPool, -{ - /// Returns a new handle that can send commands to this type. - pub fn handle(&self) -> TransactionsHandle { - TransactionsHandle { manager_tx: self.command_tx.clone() } + if !surplus_hashes.is_empty() { + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + surplus_hashes=?*surplus_hashes, + %msg_version, + %client, + "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" + ); + + self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); + } + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes_to_request, + %msg_version, + %client, + "sending hashes in `GetPooledTransactions` request to peer's session" + ); + + // request the missing transactions + // + // get handle to peer's session again, at this point we know it exists + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; + if let Some(failed_to_request_hashes) = + self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) + { + let conn_eth_version = peer.version; + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + failed_to_request_hashes=?*failed_to_request_hashes, + %conn_eth_version, + %client, + "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" + ); + self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + } } } @@ -329,32 +681,6 @@ impl TransactionsManager where Pool: TransactionPool + 'static, { - #[inline] - fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) { - let metrics = &self.metrics; - - let TxManagerPollDurations { - acc_network_events, - acc_pending_imports, - acc_tx_events, - acc_imported_txns, - acc_fetch_events, - acc_pending_fetch, - acc_cmds, - } = poll_durations; - - // update metrics for whole poll function - metrics.duration_poll_tx_manager.set(start.elapsed().as_secs_f64()); - // update metrics for nested expressions - metrics.acc_duration_poll_network_events.set(acc_network_events.as_secs_f64()); - metrics.acc_duration_poll_pending_pool_imports.set(acc_pending_imports.as_secs_f64()); - metrics.acc_duration_poll_transaction_events.set(acc_tx_events.as_secs_f64()); - metrics.acc_duration_poll_imported_transactions.set(acc_imported_txns.as_secs_f64()); - metrics.acc_duration_poll_fetch_events.set(acc_fetch_events.as_secs_f64()); - metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64()); - metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64()); - } - /// Request handler for an incoming request for transactions fn on_get_pooled_transactions( &mut self, @@ -575,294 +901,77 @@ where } // send full transactions - self.network.send_transactions(peer_id, new_full_transactions); - } - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - Some(propagated) - } - - /// Propagate the transaction hashes to the given peer - /// - /// Note: This will only send the hashes for transactions that exist in the pool. - fn propagate_hashes_to( - &mut self, - hashes: Vec, - peer_id: PeerId, - propagation_mode: PropagationMode, - ) { - trace!(target: "net::tx", "Start propagating transactions as hashes"); - - // This fetches a transactions from the pool, including the blob transactions, which are - // only ever sent as hashes. - let propagated = { - let Some(peer) = self.peers.get_mut(&peer_id) else { - // no such peer - return - }; - - let to_propagate: Vec = - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); - - let mut propagated = PropagatedTransactions::default(); - - // check if transaction is known to peer - let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - - if propagation_mode.is_forced() { - hashes.extend(to_propagate) - } else { - for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { - // Include if the peer hasn't seen it - hashes.push(&tx); - } - } - } - - let new_pooled_hashes = hashes.build(); - - if new_pooled_hashes.is_empty() { - // nothing to propagate - return - } - - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); - } - - trace!(target: "net::tx::propagation", ?peer_id, ?new_pooled_hashes, "Propagating transactions to peer"); - - // send hashes of transactions - self.network.send_transactions_hashes(peer_id, new_pooled_hashes); - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - propagated - }; - - // notify pool so events get fired - self.pool.on_propagated(propagated); - } - - /// Request handler for an incoming `NewPooledTransactionHashes` - fn on_new_pooled_transaction_hashes( - &mut self, - peer_id: PeerId, - msg: NewPooledTransactionHashes, - ) { - // If the node is initially syncing, ignore transactions - if self.network.is_initially_syncing() { - return - } - if self.network.tx_gossip_disabled() { - return - } - - // get handle to peer's session, if the session is still active - let Some(peer) = self.peers.get_mut(&peer_id) else { - trace!( - peer_id = format!("{peer_id:#}"), - ?msg, - "discarding announcement from inactive peer" - ); - - return - }; - let client = peer.client_version.clone(); - - // keep track of the transactions the peer knows - let mut count_txns_already_seen_by_peer = 0; - for tx in msg.iter_hashes().copied() { - if !peer.seen_transactions.insert(tx) { - count_txns_already_seen_by_peer += 1; - } - } - if count_txns_already_seen_by_peer > 0 { - // this may occur if transactions are sent or announced to a peer, at the same time as - // the peer sends/announces those hashes to us. this is because, marking - // txns as seen by a peer is done optimistically upon sending them to the - // peer. - self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); - self.metrics - .occurrences_hash_already_seen_by_peer - .increment(count_txns_already_seen_by_peer); - - trace!(target: "net::tx", - %count_txns_already_seen_by_peer, - peer_id=format!("{peer_id:#}"), - ?client, - "Peer sent hashes that have already been marked as seen by peer" - ); - - self.report_already_seen(peer_id); - } - - // 1. filter out spam - let (validation_outcome, mut partially_valid_msg) = - self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); - } - - // 2. filter out transactions pending import to pool - partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); - - // 3. filter out known hashes - // - // known txns have already been successfully fetched or received over gossip. - // - // most hashes will be filtered out here since this the mempool protocol is a gossip - // protocol, healthy peers will send many of the same hashes. - // - let hashes_count_pre_pool_filter = partially_valid_msg.len(); - self.pool.retain_unknown(&mut partially_valid_msg); - if hashes_count_pre_pool_filter > partially_valid_msg.len() { - let already_known_hashes_count = - hashes_count_pre_pool_filter - partially_valid_msg.len(); - self.metrics - .occurrences_hashes_already_in_pool - .increment(already_known_hashes_count as u64); - } - - if partially_valid_msg.is_empty() { - // nothing to request - return - } - - // 4. filter out invalid entries (spam) - // - // validates messages with respect to the given network, e.g. allowed tx types - // - let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg - .msg_version() - .expect("partially valid announcement should have version") - .is_eth68() - { - // validate eth68 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_68(partially_valid_msg) - } else { - // validate eth66 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_66(partially_valid_msg) - }; - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + self.network.send_transactions(peer_id, new_full_transactions); } - if valid_announcement_data.is_empty() { - // no valid announcement data - return - } + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - // 5. filter out already seen unknown hashes - // - // seen hashes are already in the tx fetcher, pending fetch. - // - // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx - // fetcher, hence they should be valid at this point. - let bad_imports = &self.bad_imports; - self.transaction_fetcher.filter_unseen_and_pending_hashes( - &mut valid_announcement_data, - |hash| bad_imports.contains(hash), - &peer_id, - |peer_id| self.peers.contains_key(&peer_id), - &client, - ); + Some(propagated) + } - if valid_announcement_data.is_empty() { - // nothing to request - return - } + /// Propagate the transaction hashes to the given peer + /// + /// Note: This will only send the hashes for transactions that exist in the pool. + fn propagate_hashes_to( + &mut self, + hashes: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) { + trace!(target: "net::tx", "Start propagating transactions as hashes"); - trace!(target: "net::tx::propagation", - peer_id=format!("{peer_id:#}"), - hashes_len=valid_announcement_data.iter().count(), - hashes=?valid_announcement_data.keys().collect::>(), - msg_version=%valid_announcement_data.msg_version(), - client_version=%client, - "received previously unseen and pending hashes in announcement from peer" - ); + // This fetches a transactions from the pool, including the blob transactions, which are + // only ever sent as hashes. + let propagated = { + let Some(peer) = self.peers.get_mut(&peer_id) else { + // no such peer + return + }; - // only send request for hashes to idle peer, otherwise buffer hashes storing peer as - // fallback - if !self.transaction_fetcher.is_idle(&peer_id) { - // load message version before announcement data is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - let (hashes, _version) = valid_announcement_data.into_request_hashes(); + let to_propagate: Vec = + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes, - %msg_version, - %client, - "buffering hashes announced by busy peer" - ); + let mut propagated = PropagatedTransactions::default(); - self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + // check if transaction is known to peer + let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - return - } + if propagation_mode.is_forced() { + hashes.extend(to_propagate) + } else { + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Include if the peer hasn't seen it + hashes.push(&tx); + } + } + } - // load message version before announcement data type is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - // - // demand recommended soft limit on response, however the peer may enforce an arbitrary - // limit on the response (2MB) - // - // request buffer is shrunk via call to pack request! - let init_capacity_req = - self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); - let surplus_hashes = - self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); + let new_pooled_hashes = hashes.build(); - if !surplus_hashes.is_empty() { - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - surplus_hashes=?*surplus_hashes, - %msg_version, - %client, - "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" - ); + if new_pooled_hashes.is_empty() { + // nothing to propagate + return + } - self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); - } + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); + } - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes_to_request, - %msg_version, - %client, - "sending hashes in `GetPooledTransactions` request to peer's session" - ); + trace!(target: "net::tx::propagation", ?peer_id, ?new_pooled_hashes, "Propagating transactions to peer"); - // request the missing transactions - // - // get handle to peer's session again, at this point we know it exists - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - if let Some(failed_to_request_hashes) = - self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) - { - let conn_eth_version = peer.version; + // send hashes of transactions + self.network.send_transactions_hashes(peer_id, new_pooled_hashes); - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - failed_to_request_hashes=?*failed_to_request_hashes, - %conn_eth_version, - %client, - "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" - ); - self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); - } + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + propagated + }; + + // notify pool so events get fired + self.pool.on_propagated(propagated); } /// Handles dedicated transaction events related to the `eth` protocol. @@ -1136,20 +1245,6 @@ where } } - /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { - for res in batch_results { - match res { - Ok(hash) => { - self.on_good_import(hash); - } - Err(err) => { - self.on_bad_import(err); - } - } - } - } - /// Processes a [`FetchEvent`]. fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { @@ -1165,100 +1260,6 @@ where } } } - - /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. - fn on_fetch_hashes_pending_fetch(&mut self) { - // try drain transaction hashes pending fetch - let info = &self.pending_pool_imports_info; - let max_pending_pool_imports = info.max_pending_pool_imports; - let has_capacity_wrt_pending_pool_imports = - |divisor| info.has_capacity(max_pending_pool_imports / divisor); - - self.transaction_fetcher - .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); - } - - fn report_peer_bad_transactions(&self, peer_id: PeerId) { - self.report_peer(peer_id, ReputationChangeKind::BadTransactions); - self.metrics.reported_bad_transactions.increment(1); - } - - fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { - trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); - self.network.reputation_change(peer_id, kind); - } - - fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { - let kind = match req_err { - RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, - RequestError::Timeout => ReputationChangeKind::Timeout, - RequestError::ChannelClosed | RequestError::ConnectionDropped => { - // peer is already disconnected - return - } - RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), - }; - self.report_peer(peer_id, kind); - } - - fn report_already_seen(&self, peer_id: PeerId) { - trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); - self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); - } - - /// Clear the transaction - fn on_good_import(&mut self, hash: TxHash) { - self.transactions_by_peers.remove(&hash); - } - - /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid - /// fetching or importing it again. - /// - /// Errors that count as bad transactions are: - /// - /// - intrinsic gas too low - /// - exceeds gas limit - /// - gas uint overflow - /// - exceeds max init code size - /// - oversized data - /// - signer account has bytecode - /// - chain id mismatch - /// - old legacy chain id - /// - tx type not supported - /// - /// (and additionally for blobs txns...) - /// - /// - no blobs - /// - too many blobs - /// - invalid kzg proof - /// - kzg error - /// - not blob transaction (tx type mismatch) - /// - wrong versioned kzg commitment hash - fn on_bad_import(&mut self, err: PoolError) { - let peers = self.transactions_by_peers.remove(&err.hash); - - // if we're _currently_ syncing, we ignore a bad transaction - if !err.is_bad_transaction() || self.network.is_syncing() { - return - } - // otherwise we penalize the peer that sent the bad transaction, with the assumption that - // the peer should have known that this transaction is bad (e.g. violating consensus rules) - if let Some(peers) = peers { - for peer_id in peers { - self.report_peer_bad_transactions(peer_id); - } - } - self.metrics.bad_imports.increment(1); - self.bad_imports.insert(err.hash); - } - - /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns - /// `false` if [`TransactionsManager`] is operating close to full capacity. - fn has_capacity_for_fetching_pending_hashes(&self) -> bool { - self.pending_pool_imports_info - .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && - self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() - } } /// An endless future. Preemption ensure that future is non-blocking, nonetheless. See From fcb5050f87054a5bf3cbb0d3b677a4f42ffdbabd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Nov 2024 20:59:21 +0100 Subject: [PATCH 563/970] Add SDK codeowners (#12685) --- .github/CODEOWNERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 488e6c90cf7..5a1d1df7261 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,11 +20,11 @@ crates/fs-util/ @onbjerg @emhane crates/metrics/ @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk @emhane -crates/node/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg @emhane @klkvr crates/optimism/ @mattsse @Rjected @fgimenez @emhane crates/payload/ @mattsse @Rjected -crates/primitives/ @Rjected -crates/primitives-traits/ @Rjected @joshieDo +crates/primitives/ @Rjected @emhane @mattsse @klkvr +crates/primitives-traits/ @Rjected @joshieDo @emhane @mattsse @klkvr crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @emhane From aa34a2795b1a2ed22a9ef58bf935092de77afc20 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Nov 2024 21:16:45 +0100 Subject: [PATCH 564/970] chore(sdk): make `reth-chain-state` types generic over receipt (#12667) --- Cargo.lock | 1 + crates/blockchain-tree/src/shareable.rs | 6 +- crates/chain-state/src/in_memory.rs | 164 ++++++++++++----------- crates/chain-state/src/lib.rs | 3 + crates/chain-state/src/memory_overlay.rs | 16 +-- crates/chain-state/src/notifications.rs | 41 +++--- crates/chain-state/src/test_utils.rs | 16 ++- crates/exex/types/Cargo.toml | 7 +- crates/exex/types/src/notification.rs | 15 ++- crates/primitives/src/lib.rs | 2 +- 10 files changed, 144 insertions(+), 127 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb9018a55b7..19e90852175 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7644,6 +7644,7 @@ dependencies = [ "reth-chain-state", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "serde", "serde_with", ] diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 8e6cceccdd1..ec1f3cccf97 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -13,8 +13,8 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, - FullExecutionDataProvider, ProviderError, + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, + CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -188,7 +188,7 @@ where N: ProviderNodeTypes, E: Send + Sync, { - fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); self.tree.read().subscribe_canon_state() } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 47443b36c67..e07eaeaa5d9 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,7 +12,7 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, NodePrimitives, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; @@ -50,22 +50,22 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. - blocks: RwLock>>, + blocks: RwLock>>>, /// Mapping of block numbers to block hashes. numbers: RwLock>, /// The pending block that has not yet been made canonical. - pending: watch::Sender>, + pending: watch::Sender>>, /// Metrics for the in-memory state. metrics: InMemoryStateMetrics, } -impl InMemoryState { +impl InMemoryState { pub(crate) fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, ) -> Self { let (pending, _) = watch::channel(pending); let this = Self { @@ -95,12 +95,12 @@ impl InMemoryState { } /// Returns the state for a given block hash. - pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { + pub(crate) fn state_by_hash(&self, hash: B256) -> Option>> { self.blocks.read().get(&hash).cloned() } /// Returns the state for a given block number. - pub(crate) fn state_by_number(&self, number: u64) -> Option> { + pub(crate) fn state_by_number(&self, number: u64) -> Option>> { let hash = self.hash_by_number(number)?; self.state_by_hash(hash) } @@ -111,14 +111,14 @@ impl InMemoryState { } /// Returns the current chain head state. - pub(crate) fn head_state(&self) -> Option> { + pub(crate) fn head_state(&self) -> Option>> { let hash = *self.numbers.read().last_key_value()?.1; self.state_by_hash(hash) } /// Returns the pending state corresponding to the current head plus one, /// from the payload received in newPayload that does not have a FCU yet. - pub(crate) fn pending_state(&self) -> Option { + pub(crate) fn pending_state(&self) -> Option> { self.pending.borrow().clone() } @@ -131,17 +131,17 @@ impl InMemoryState { /// Inner type to provide in memory state. It includes a chain tracker to be /// advanced internally by the tree. #[derive(Debug)] -pub(crate) struct CanonicalInMemoryStateInner { +pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. - pub(crate) in_memory_state: InMemoryState, + pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. - pub(crate) canon_state_notification_sender: CanonStateNotificationSender, + pub(crate) canon_state_notification_sender: CanonStateNotificationSender, } -impl CanonicalInMemoryStateInner { +impl CanonicalInMemoryStateInner { /// Clears all entries in the in memory state. fn clear(&self) { { @@ -162,17 +162,17 @@ impl CanonicalInMemoryStateInner { /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - pub(crate) inner: Arc, +pub struct CanonicalInMemoryState { + pub(crate) inner: Arc>, } -impl CanonicalInMemoryState { +impl CanonicalInMemoryState { /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, finalized: Option, safe: Option, ) -> Self { @@ -236,7 +236,7 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlock) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.block().parent_hash); let pending = BlockState::with_parent(pending, parent); @@ -252,7 +252,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: I) where - I: IntoIterator, + I: IntoIterator>, { { // acquire locks, starting with the numbers lock @@ -288,7 +288,7 @@ impl CanonicalInMemoryState { } /// Update the in memory state with the given chain update. - pub fn update_chain(&self, new_chain: NewCanonicalChain) { + pub fn update_chain(&self, new_chain: NewCanonicalChain) { match new_chain { NewCanonicalChain::Commit { new } => { self.update_blocks(new, vec![]); @@ -359,22 +359,22 @@ impl CanonicalInMemoryState { } /// Returns in memory state corresponding the given hash. - pub fn state_by_hash(&self, hash: B256) -> Option> { + pub fn state_by_hash(&self, hash: B256) -> Option>> { self.inner.in_memory_state.state_by_hash(hash) } /// Returns in memory state corresponding the block number. - pub fn state_by_number(&self, number: u64) -> Option> { + pub fn state_by_number(&self, number: u64) -> Option>> { self.inner.in_memory_state.state_by_number(number) } /// Returns the in memory head state. - pub fn head_state(&self) -> Option> { + pub fn head_state(&self) -> Option>> { self.inner.in_memory_state.head_state() } /// Returns the in memory pending state. - pub fn pending_state(&self) -> Option { + pub fn pending_state(&self) -> Option> { self.inner.in_memory_state.pending_state() } @@ -479,14 +479,14 @@ impl CanonicalInMemoryState { /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) } /// Subscribe to new blocks events. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { self.inner.canon_state_notification_sender.subscribe() } @@ -501,7 +501,7 @@ impl CanonicalInMemoryState { } /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. - pub fn notify_canon_state(&self, event: CanonStateNotification) { + pub fn notify_canon_state(&self, event: CanonStateNotification) { self.inner.canon_state_notification_sender.send(event).ok(); } @@ -513,7 +513,7 @@ impl CanonicalInMemoryState { &self, hash: B256, historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { + ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { state.chain().map(|block_state| block_state.block()).collect() } else { @@ -527,7 +527,7 @@ impl CanonicalInMemoryState { /// oldest (highest to lowest). /// /// This iterator contains a snapshot of the in-memory state at the time of the call. - pub fn canonical_chain(&self) -> impl Iterator> { + pub fn canonical_chain(&self) -> impl Iterator>> { self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } @@ -577,22 +577,22 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlock, + block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>>, } #[allow(dead_code)] -impl BlockState { +impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlock) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -606,12 +606,12 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlock { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlock { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } @@ -646,7 +646,7 @@ impl BlockState { } /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Receipts { + pub fn receipts(&self) -> &Receipts { &self.block.execution_outcome().receipts } @@ -654,7 +654,7 @@ impl BlockState { /// We assume that the `Receipts` in the executed block `ExecutionOutcome` /// has only one element corresponding to the executed block associated to /// the state. - pub fn executed_block_receipts(&self) -> Vec { + pub fn executed_block_receipts(&self) -> Vec { let receipts = self.receipts(); debug_assert!( @@ -713,7 +713,7 @@ impl BlockState { /// /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) @@ -771,25 +771,25 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. pub block: Arc, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. - pub execution_output: Arc, + pub execution_output: Arc>, /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, } -impl ExecutedBlock { +impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( block: Arc, senders: Arc>, - execution_output: Arc, + execution_output: Arc>, hashed_state: Arc, trie: Arc, ) -> Self { @@ -814,7 +814,7 @@ impl ExecutedBlock { } /// Returns a reference to the block's execution outcome - pub fn execution_outcome(&self) -> &ExecutionOutcome { + pub fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_output } @@ -831,23 +831,23 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec, + new: Vec>, /// All blocks of the _old_ chain - old: Vec, + old: Vec>, }, } -impl NewCanonicalChain { +impl NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -864,7 +864,7 @@ impl NewCanonicalChain { } /// Converts the new chain into a notification that will be emitted to listeners - pub fn to_chain_notification(&self) -> CanonStateNotification { + pub fn to_chain_notification(&self) -> CanonStateNotification { match self { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { @@ -917,7 +917,7 @@ mod tests { use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt}; + use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -925,7 +925,7 @@ mod tests { use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; fn create_mock_state( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, block_number: u64, parent_hash: B256, ) -> BlockState { @@ -935,7 +935,7 @@ mod tests { } fn create_mock_state_chain( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, num_blocks: u64, ) -> Vec { let mut chain = Vec::with_capacity(num_blocks as usize); @@ -1065,7 +1065,7 @@ mod tests { fn test_in_memory_state_impl_state_by_hash() { let mut state_by_hash = HashMap::default(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); state_by_hash.insert(state.hash(), state.clone()); @@ -1081,7 +1081,7 @@ mod tests { let mut hash_by_number = BTreeMap::new(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); let hash = state.hash(); @@ -1098,7 +1098,7 @@ mod tests { fn test_in_memory_state_impl_head_state() { let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random())); let hash1 = state1.hash(); let state2 = Arc::new(create_mock_state(&mut test_block_builder, 2, hash1)); @@ -1118,7 +1118,7 @@ mod tests { #[test] fn test_in_memory_state_impl_pending_state() { let pending_number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let pending_state = create_mock_state(&mut test_block_builder, pending_number, B256::random()); let pending_hash = pending_state.hash(); @@ -1135,7 +1135,8 @@ mod tests { #[test] fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::default(), BTreeMap::new(), None); + let in_memory_state: InMemoryState = + InMemoryState::new(HashMap::default(), BTreeMap::new(), None); assert_eq!(in_memory_state.pending_state(), None); } @@ -1143,7 +1144,7 @@ mod tests { #[test] fn test_state_new() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1154,7 +1155,7 @@ mod tests { #[test] fn test_state_block() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1165,7 +1166,7 @@ mod tests { #[test] fn test_state_hash() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1176,7 +1177,7 @@ mod tests { #[test] fn test_state_number() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block); @@ -1187,7 +1188,7 @@ mod tests { #[test] fn test_state_state_root() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1198,7 +1199,7 @@ mod tests { #[test] fn test_state_receipts() { let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_receipts(receipts.clone(), B256::random()); @@ -1209,8 +1210,8 @@ mod tests { #[test] fn test_in_memory_state_chain_update() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; @@ -1234,8 +1235,8 @@ mod tests { #[test] fn test_in_memory_state_set_pending_block() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); // First random block let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); @@ -1286,7 +1287,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_state_provider() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(1, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); @@ -1333,14 +1334,15 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); let chain: Vec<_> = state.canonical_chain().collect(); assert!(chain.is_empty()); } #[test] fn test_canonical_in_memory_state_canonical_chain_single_block() { - let block = TestBlockBuilder::default().get_executed_block_with_number(1, B256::random()); + let block = TestBlockBuilder::::default() + .get_executed_block_with_number(1, B256::random()); let hash = block.block().hash(); let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); @@ -1359,7 +1361,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1381,7 +1383,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1401,7 +1403,7 @@ mod tests { #[test] fn test_block_state_parent_blocks() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 4); let parents = chain[3].parent_state_chain(); @@ -1422,7 +1424,7 @@ mod tests { #[test] fn test_block_state_single_block_state_chain() { let single_block_number = 1; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let single_block = create_mock_state(&mut test_block_builder, single_block_number, B256::random()); let single_block_hash = single_block.block().block.hash(); @@ -1438,7 +1440,7 @@ mod tests { #[test] fn test_block_state_chain() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); let block_state_chain = chain[2].chain().collect::>(); @@ -1460,7 +1462,7 @@ mod tests { #[test] fn test_to_chain_notification() { // Generate 4 blocks - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block0 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index bd9b43a59ea..519469d67f6 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -27,3 +27,6 @@ pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderR #[cfg(any(test, feature = "test-utils"))] /// Common test helpers pub mod test_utils; + +// todo: remove when generic data prim integration complete +pub use reth_primitives::EthPrimitives; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index ada0faee490..88cd411d38b 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_errors::ProviderResult; -use reth_primitives::{Account, Bytecode}; +use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -18,11 +18,11 @@ use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as a /// reference of the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProviderRef<'a> { +pub struct MemoryOverlayStateProviderRef<'a, N: NodePrimitives = reth_primitives::EthPrimitives> { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -30,11 +30,11 @@ pub struct MemoryOverlayStateProviderRef<'a> { /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProvider { +pub struct MemoryOverlayStateProvider { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -49,7 +49,7 @@ macro_rules! impl_state_provider { /// - `in_memory` - the collection of executed ancestor blocks in reverse. /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. - pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + pub fn new(historical: $historical_type, in_memory: Vec>) -> Self { Self { historical, in_memory, trie_state: OnceLock::new() } } @@ -230,8 +230,8 @@ macro_rules! impl_state_provider { }; } -impl_state_provider!([], MemoryOverlayStateProvider, Box); -impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a, N: NodePrimitives>], MemoryOverlayStateProviderRef<'a, N>, Box); /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 582e1d2a05d..84fb120d4b2 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -3,7 +3,7 @@ use auto_impl::auto_impl; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; -use reth_primitives::{SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use std::{ pin::Pin, sync::Arc, @@ -17,10 +17,12 @@ use tokio_stream::{ use tracing::debug; /// Type alias for a receiver that receives [`CanonStateNotification`] -pub type CanonStateNotifications = broadcast::Receiver; +pub type CanonStateNotifications = + broadcast::Receiver>; /// Type alias for a sender that sends [`CanonStateNotification`] -pub type CanonStateNotificationSender = broadcast::Sender; +pub type CanonStateNotificationSender = + broadcast::Sender>; /// A type that allows to register chain related event subscriptions. #[auto_impl(&, Arc)] @@ -41,13 +43,13 @@ pub trait CanonStateSubscriptions: Send + Sync { /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] -pub struct CanonStateNotificationStream { +pub struct CanonStateNotificationStream { #[pin] - st: BroadcastStream, + st: BroadcastStream>, } -impl Stream for CanonStateNotificationStream { - type Item = CanonStateNotification; +impl Stream for CanonStateNotificationStream { + type Item = CanonStateNotification; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { @@ -68,11 +70,11 @@ impl Stream for CanonStateNotificationStream { /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum CanonStateNotification { +pub enum CanonStateNotification { /// The canonical chain was extended. Commit { /// The newly added chain segment. - new: Arc, + new: Arc>, }, /// A chain segment was reverted or reorged. /// @@ -82,18 +84,18 @@ pub enum CanonStateNotification { /// chain segment. Reorg { /// The chain segment that was reverted. - old: Arc, + old: Arc>, /// The chain segment that was added on top of the canonical chain, minus the reverted /// blocks. /// /// In the case of a revert, not a reorg, this chain segment is empty. - new: Arc, + new: Arc>, }, } -impl CanonStateNotification { +impl CanonStateNotification { /// Get the chain segment that was reverted, if any. - pub fn reverted(&self) -> Option> { + pub fn reverted(&self) -> Option>> { match self { Self::Commit { .. } => None, Self::Reorg { old, .. } => Some(old.clone()), @@ -101,7 +103,7 @@ impl CanonStateNotification { } /// Get the newly imported chain segment, if any. - pub fn committed(&self) -> Arc { + pub fn committed(&self) -> Arc> { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.clone(), } @@ -122,7 +124,7 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { let mut receipts = Vec::new(); // get old receipts @@ -212,7 +214,7 @@ mod tests { block2.set_block_number(2); block2.set_hash(block2_hash); - let chain = Arc::new(Chain::new( + let chain: Arc = Arc::new(Chain::new( vec![block1.clone(), block2.clone()], ExecutionOutcome::default(), None, @@ -250,7 +252,7 @@ mod tests { block3.set_block_number(3); block3.set_hash(block3_hash); - let old_chain = + let old_chain: Arc = Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); let new_chain = Arc::new(Chain::new( vec![block2.clone(), block3.clone()], @@ -313,7 +315,7 @@ mod tests { let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; // Create a new chain segment with `block1` and `block2` and the execution outcome. - let new_chain = + let new_chain: Arc = Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); // Create a commit notification containing the new chain segment. @@ -361,7 +363,8 @@ mod tests { ExecutionOutcome { receipts: old_receipts, ..Default::default() }; // Create an old chain segment to be reverted, containing `old_block1`. - let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + let old_chain: Arc = + Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); // Define block2 for the new chain segment, which will be committed. let mut new_block1 = SealedBlockWithSenders::default(); diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 60a90e43fee..63689f07f03 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,3 +1,5 @@ +use core::marker::PhantomData; + use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, @@ -12,8 +14,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, - TransactionSigned, TransactionSignedEcRecovered, + BlockBody, NodePrimitives, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -27,7 +29,7 @@ use tokio::sync::broadcast::{self, Sender}; /// Functionality to build blocks for tests and help with assertions about /// their execution. #[derive(Debug)] -pub struct TestBlockBuilder { +pub struct TestBlockBuilder { /// The account that signs all the block's transactions. pub signer: Address, /// Private key for signing. @@ -40,9 +42,10 @@ pub struct TestBlockBuilder { pub signer_build_account_info: AccountInfo, /// Chain spec of the blocks generated by this builder pub chain_spec: ChainSpec, + _prims: PhantomData, } -impl Default for TestBlockBuilder { +impl Default for TestBlockBuilder { fn default() -> Self { let initial_account_info = AccountInfo::from_balance(U256::from(10).pow(U256::from(18))); let signer_pk = PrivateKeySigner::random(); @@ -53,6 +56,7 @@ impl Default for TestBlockBuilder { signer_pk, signer_execute_account_info: initial_account_info.clone(), signer_build_account_info: initial_account_info, + _prims: PhantomData, } } } @@ -289,8 +293,8 @@ impl TestBlockBuilder { } /// A test `ChainEventSubscriptions` #[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>>, } impl TestCanonStateSubscriptions { diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 51097d6109c..3b67fd5aa50 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives-traits.workspace = true # reth alloy-primitives.workspace = true @@ -38,11 +39,13 @@ serde = [ "reth-execution-types/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "rand/serde", + "reth-primitives-traits/serde", ] serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "serde_with", "reth-primitives/serde-bincode-compat", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", ] diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 61d42a3319b..fb0762f04b3 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -2,27 +2,28 @@ use std::sync::Arc; use reth_chain_state::CanonStateNotification; use reth_execution_types::Chain; +use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc, + old: Arc>, /// The new chain after reorg. - new: Arc, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc, + old: Arc>, }, } @@ -60,8 +61,8 @@ impl ExExNotification { } } -impl From for ExExNotification { - fn from(notification: CanonStateNotification) -> Self { +impl From> for ExExNotification

{ + fn from(notification: CanonStateNotification

) -> Self { match notification { CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c3682ecba1d..027bf97cfa5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -76,7 +76,7 @@ pub mod serde_bincode_compat { } /// Temp helper struct for integrating [`NodePrimitives`]. -#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct EthPrimitives; #[cfg(feature = "reth-codec")] From 02a90e1c0b48c0af5842b92a49860e8446b5a0e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Nov 2024 21:46:28 +0100 Subject: [PATCH 565/970] chore: rm allowance from oog error (#12686) --- crates/rpc/rpc-eth-types/src/error/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 893bbdd6b9c..187e2d943f7 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -362,7 +362,7 @@ pub enum RpcInvalidTransactionError { SenderNoEOA, /// Gas limit was exceeded during execution. /// Contains the gas limit. - #[error("out of gas: gas required exceeds allowance: {0}")] + #[error("out of gas: gas required exceeds: {0}")] BasicOutOfGas(u64), /// Gas limit was exceeded during memory expansion. /// Contains the gas limit. From 10caa9f8465043f6a0ab31f4543d86136aa1c419 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 20 Nov 2024 01:12:43 +0000 Subject: [PATCH 566/970] fix: use `body.recover_signers_unchecked` instead on `try_with_senders_unchecked` (#12668) --- crates/primitives/src/block.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 94dd578493c..5c47c49f437 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -69,7 +69,7 @@ impl Block { let senders = if self.body.transactions.len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; @@ -379,7 +379,7 @@ impl SealedBlock { let senders = if self.body.transactions.len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; @@ -616,6 +616,15 @@ impl BlockBody { TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) } + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`TransactionSigned::recover_signer_unchecked`]. + pub fn recover_signers_unchecked(&self) -> Option> { + TransactionSigned::recover_signers_unchecked(&self.transactions, self.transactions.len()) + } + /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { From 942ba7e823600828190aaf111a9a6bb9803a703e Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:27:09 +0100 Subject: [PATCH 567/970] tx-pool: impl `From` for `Destination` (#12689) --- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- crates/transaction-pool/src/pool/update.rs | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 537162ac76c..576672b91af 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1328,7 +1328,7 @@ impl AllTransactions { id: *tx.transaction.id(), hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -1738,7 +1738,7 @@ impl AllTransactions { id: *id, hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index a5cce8291fa..d62b1792e7b 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -26,3 +26,9 @@ pub(crate) enum Destination { /// Move transaction to pool Pool(SubPool), } + +impl From for Destination { + fn from(sub_pool: SubPool) -> Self { + Self::Pool(sub_pool) + } +} From 3b120283192f276562de4f9019e6c3b59dc1efc4 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:13:17 +0100 Subject: [PATCH 568/970] text(tx-pool): add unit tests for `DiskFileBlobStore` (#12692) --- crates/transaction-pool/src/blobstore/disk.rs | 137 +++++++++++++++++- crates/transaction-pool/src/blobstore/mod.rs | 2 +- 2 files changed, 134 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 9d02276db85..67c36a65998 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -75,10 +75,7 @@ impl BlobStore for DiskFileBlobStore { } fn cleanup(&self) -> BlobStoreCleanupStat { - let txs_to_delete = { - let mut txs_to_delete = self.inner.txs_to_delete.write(); - std::mem::take(&mut *txs_to_delete) - }; + let txs_to_delete = std::mem::take(&mut *self.inner.txs_to_delete.write()); let mut stat = BlobStoreCleanupStat::default(); let mut subsize = 0; debug!(target:"txpool::blob", num_blobs=%txs_to_delete.len(), "Removing blobs from disk"); @@ -554,4 +551,136 @@ mod tests { assert_eq!(store.data_size_hint(), Some(0)); assert_eq!(store.inner.size_tracker.num_blobs.load(Ordering::Relaxed), 0); } + + #[test] + fn disk_insert_and_retrieve() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob.clone()).unwrap(); + + assert!(store.is_cached(&tx)); + let retrieved_blob = store.get(tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(retrieved_blob, blob); + } + + #[test] + fn disk_delete_blob() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob).unwrap(); + assert!(store.is_cached(&tx)); + + store.delete(tx).unwrap(); + assert!(store.inner.txs_to_delete.read().contains(&tx)); + store.cleanup(); + + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + + #[test] + fn disk_insert_all_and_delete_all() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(5); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + for (tx, _) in &blobs { + assert!(store.is_cached(tx)); + } + + store.delete_all(txs.clone()).unwrap(); + store.cleanup(); + + for tx in txs { + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + } + + #[test] + fn disk_get_all_blobs() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_all(txs.clone()).unwrap(); + for (tx, blob) in retrieved_blobs { + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob)))); + } + + store.delete_all(txs).unwrap(); + store.cleanup(); + } + + #[test] + fn disk_get_exact_blobs_success() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_exact(txs).unwrap(); + for (retrieved_blob, (_, original_blob)) in retrieved_blobs.into_iter().zip(blobs) { + assert_eq!(Arc::unwrap_or_clone(retrieved_blob), original_blob); + } + } + + #[test] + fn disk_get_exact_blobs_failure() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(2); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + // Try to get a blob that was never inserted + let missing_tx = TxHash::random(); + let result = store.get_exact(vec![txs[0], missing_tx]); + assert!(result.is_err()); + } + + #[test] + fn disk_data_size_hint() { + let (store, _dir) = tmp_store(); + assert_eq!(store.data_size_hint(), Some(0)); + + let blobs = rng_blobs(2); + store.insert_all(blobs).unwrap(); + assert!(store.data_size_hint().unwrap() > 0); + } + + #[test] + fn disk_cleanup_stat() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + store.delete_all(txs).unwrap(); + let stat = store.cleanup(); + assert_eq!(stat.delete_succeed, 3); + assert_eq!(stat.delete_failed, 0); + } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index f1612bcd022..a21cea6e06c 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -152,7 +152,7 @@ impl PartialEq for BlobStoreSize { } /// Statistics for the cleanup operation. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct BlobStoreCleanupStat { /// the number of successfully deleted blobs pub delete_succeed: usize, From 2c885eee21a18e2f4ca078fe742eac7aeb1203cd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 10:46:47 +0100 Subject: [PATCH 569/970] chore: rm unused windows import (#12697) --- crates/storage/nippy-jar/src/lib.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index b1d174feb2c..98eddf22ee9 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -20,11 +20,6 @@ use std::{ ops::Range, path::{Path, PathBuf}, }; - -// Windows specific extension for std::fs -#[cfg(windows)] -use std::os::windows::prelude::OpenOptionsExt; - use tracing::*; /// Compression algorithms supported by `NippyJar`. From 11847b4f1e29a886b87890c1334dde5080e62469 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:53:53 +0100 Subject: [PATCH 570/970] text(tx-pool): add unit tests for tx pool state (#12690) --- crates/transaction-pool/src/pool/state.rs | 76 +++++++++++++++++++---- 1 file changed, 64 insertions(+), 12 deletions(-) diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index d0a3b10f8cb..d65fc05b03f 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -46,8 +46,6 @@ bitflags::bitflags! { } } -// === impl TxState === - impl TxState { /// The state of a transaction is considered `pending`, if the transaction has: /// - _No_ parked ancestors @@ -89,8 +87,6 @@ pub enum SubPool { Pending, } -// === impl SubPool === - impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. #[inline] @@ -126,16 +122,15 @@ impl SubPool { impl From for SubPool { fn from(value: TxState) -> Self { if value.is_pending() { - return Self::Pending - } - if value.is_blob() { + Self::Pending + } else if value.is_blob() { // all _non-pending_ blob transactions are in the blob sub-pool - return Self::Blob + Self::Blob + } else if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { + Self::Queued + } else { + Self::BaseFee } - if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { - return Self::Queued - } - Self::BaseFee } } @@ -204,4 +199,61 @@ mod tests { assert!(state.is_blob()); assert!(!state.is_pending()); } + + #[test] + fn test_tx_state_no_nonce_gap() { + let mut state = TxState::default(); + state |= TxState::NO_NONCE_GAPS; + assert!(!state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_with_nonce_gap() { + let state = TxState::default(); + assert!(state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_enough_balance() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_BALANCE); + assert!(state.contains(TxState::ENOUGH_BALANCE)); + } + + #[test] + fn test_tx_state_not_too_much_gas() { + let mut state = TxState::default(); + state.insert(TxState::NOT_TOO_MUCH_GAS); + assert!(state.contains(TxState::NOT_TOO_MUCH_GAS)); + } + + #[test] + fn test_tx_state_enough_fee_cap_block() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); + assert!(state.contains(TxState::ENOUGH_FEE_CAP_BLOCK)); + } + + #[test] + fn test_tx_base_fee() { + let state = TxState::BASE_FEE_POOL_BITS; + assert_eq!(SubPool::BaseFee, state.into()); + } + + #[test] + fn test_blob_transaction_only() { + let state = TxState::BLOB_TRANSACTION; + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } + + #[test] + fn test_blob_transaction_with_base_fee_bits() { + let mut state = TxState::BASE_FEE_POOL_BITS; + state.insert(TxState::BLOB_TRANSACTION); + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } } From 7b13a22698da1d9e6aab5496b09570883c813b00 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:56:12 +0700 Subject: [PATCH 571/970] perf(tx-pool): avoid copying tx cost (#12629) --- crates/transaction-pool/src/pool/txpool.rs | 6 +- .../transaction-pool/src/test_utils/mock.rs | 79 ++++++++++++------- crates/transaction-pool/src/traits.rs | 6 +- crates/transaction-pool/src/validate/eth.rs | 5 +- crates/transaction-pool/src/validate/mod.rs | 2 +- examples/network-txpool/src/main.rs | 2 +- 6 files changed, 62 insertions(+), 38 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 576672b91af..86bf5f741c3 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -657,7 +657,7 @@ impl TxPool { InsertErr::Overdraft { transaction } => Err(PoolError::new( *transaction.hash(), PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft { - cost: transaction.cost(), + cost: *transaction.cost(), balance: on_chain_balance, }), )), @@ -1229,7 +1229,7 @@ impl AllTransactions { tx.state.insert(TxState::NO_NONCE_GAPS); tx.state.insert(TxState::NO_PARKED_ANCESTORS); tx.cumulative_cost = U256::ZERO; - if tx.transaction.cost() > info.balance { + if tx.transaction.cost() > &info.balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { @@ -1542,7 +1542,7 @@ impl AllTransactions { } } } - } else if new_blob_tx.cost() > on_chain_balance { + } else if new_blob_tx.cost() > &on_chain_balance { // the transaction would go into overdraft return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 56acbb107f3..72304910e15 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -59,6 +59,8 @@ macro_rules! set_value { *$field = new_value; } } + // Ensure the tx cost is always correct after each mutation. + $this.update_cost(); }; } @@ -123,6 +125,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-2930 transaction type. Eip2930 { @@ -148,6 +152,8 @@ pub enum MockTransaction { access_list: AccessList, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-1559 transaction type. Eip1559 { @@ -175,6 +181,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-4844 transaction type. Eip4844 { @@ -206,6 +214,8 @@ pub enum MockTransaction { sidecar: BlobTransactionSidecar, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, } @@ -235,6 +245,7 @@ impl MockTransaction { value: Default::default(), input: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -252,6 +263,7 @@ impl MockTransaction { gas_price: 0, access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -270,6 +282,7 @@ impl MockTransaction { input: Bytes::new(), access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -290,6 +303,7 @@ impl MockTransaction { access_list: Default::default(), sidecar: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -560,6 +574,19 @@ impl MockTransaction { pub const fn is_eip2930(&self) -> bool { matches!(self, Self::Eip2930 { .. }) } + + fn update_cost(&mut self) { + match self { + Self::Legacy { cost, gas_limit, gas_price, value, .. } | + Self::Eip2930 { cost, gas_limit, gas_price, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*gas_price) + *value + } + Self::Eip1559 { cost, gas_limit, max_fee_per_gas, value, .. } | + Self::Eip4844 { cost, gas_limit, max_fee_per_gas, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value + } + }; + } } impl PoolTransaction for MockTransaction { @@ -593,16 +620,16 @@ impl PoolTransaction for MockTransaction { *self.get_nonce() } - fn cost(&self) -> U256 { + // Having `get_cost` from `make_setters_getters` would be cleaner but we didn't + // want to also generate the error-prone cost setters. For now cost should be + // correct at construction and auto-updated per field update via `update_cost`, + // not to be manually set. + fn cost(&self) -> &U256 { match self { - Self::Legacy { gas_price, value, gas_limit, .. } | - Self::Eip2930 { gas_limit, gas_price, value, .. } => { - U256::from(*gas_limit) * U256::from(*gas_price) + *value - } - Self::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | - Self::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value - } + Self::Legacy { cost, .. } | + Self::Eip2930 { cost, .. } | + Self::Eip1559 { cost, .. } | + Self::Eip4844 { cost, .. } => cost, } } @@ -783,6 +810,7 @@ impl TryFrom for MockTransaction { value, input, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip2930(TxEip2930 { chain_id, @@ -805,6 +833,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip1559(TxEip1559 { chain_id, @@ -829,6 +858,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), Transaction::Eip4844(TxEip4844 { chain_id, @@ -857,6 +887,7 @@ impl TryFrom for MockTransaction { access_list, sidecar: BlobTransactionSidecar::default(), size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), _ => unreachable!("Invalid transaction type"), } @@ -888,28 +919,24 @@ impl From for Transaction { match mock { MockTransaction::Legacy { chain_id, - hash: _, - sender: _, nonce, gas_price, gas_limit, to, value, input, - size: _, + .. } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), MockTransaction::Eip2930 { chain_id, - hash: _, - sender: _, nonce, - to, + gas_price, gas_limit, - input, + to, value, - gas_price, access_list, - size: _, + input, + .. } => Self::Eip2930(TxEip2930 { chain_id, nonce, @@ -922,17 +949,15 @@ impl From for Transaction { }), MockTransaction::Eip1559 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit, to, value, access_list, input, - size: _, + .. } => Self::Eip1559(TxEip1559 { chain_id, nonce, @@ -946,19 +971,17 @@ impl From for Transaction { }), MockTransaction::Eip4844 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, - gas_limit, to, value, access_list, - input, sidecar, - size: _, + max_fee_per_blob_gas, + input, + .. } => Self::Eip4844(TxEip4844 { chain_id, nonce, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index a7e9010d693..23f28cc3fa7 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -961,7 +961,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256; + fn cost(&self) -> &U256; /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64; @@ -1228,8 +1228,8 @@ impl PoolTransaction for EthPooledTransaction { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256 { - self.cost + fn cost(&self) -> &U256 { + &self.cost } /// Amount of gas that should be used in executing this transaction. This is paid up-front. diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index d5f7101eb55..70298487694 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -384,11 +384,12 @@ where let cost = transaction.cost(); // Checks for max cost - if cost > account.balance { + if cost > &account.balance { + let expected = *cost; return TransactionValidationOutcome::Invalid( transaction, InvalidTransactionError::InsufficientFunds( - GotExpected { got: account.balance, expected: cost }.into(), + GotExpected { got: account.balance, expected }.into(), ) .into(), ) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 8a5ecc9c419..35e3a85537e 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -312,7 +312,7 @@ impl ValidPoolTransaction { /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. - pub fn cost(&self) -> U256 { + pub fn cost(&self) -> &U256 { self.transaction.cost() } diff --git a/examples/network-txpool/src/main.rs b/examples/network-txpool/src/main.rs index 6f8d69eab02..e66185ad828 100644 --- a/examples/network-txpool/src/main.rs +++ b/examples/network-txpool/src/main.rs @@ -82,7 +82,7 @@ impl TransactionValidator for OkValidator { ) -> TransactionValidationOutcome { // Always return valid TransactionValidationOutcome::Valid { - balance: transaction.cost(), + balance: *transaction.cost(), state_nonce: transaction.nonce(), transaction: ValidTransaction::Valid(transaction), propagate: false, From b178f3a160f826b262e78081563e5dee73b3718e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 11:34:16 +0100 Subject: [PATCH 572/970] chore: add receipts to networkprimitives (#12699) --- crates/net/eth-wire-types/src/primitives.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index eab36c3b6a7..c8b62cb0a82 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -70,6 +70,18 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + + /// The transaction type which peers return in `GetReceipts` messages. + type Receipt: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; } /// Primitive types used by Ethereum network. @@ -83,4 +95,5 @@ impl NetworkPrimitives for EthNetworkPrimitives { type Block = reth_primitives::Block; type BroadcastedTransaction = reth_primitives::TransactionSigned; type PooledTransaction = reth_primitives::PooledTransactionsElement; + type Receipt = reth_primitives::Receipt; } From 9c7536484c852cd5afe97a51d803e492a95b377c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 11:35:10 +0100 Subject: [PATCH 573/970] chore: bump op-alloy (#12696) --- Cargo.lock | 36 ++++++++++++++------------ Cargo.toml | 8 +++--- crates/optimism/payload/src/payload.rs | 2 +- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19e90852175..0326a37f499 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5286,9 +5286,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" +checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5299,14 +5299,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "spin", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-genesis" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" +checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5314,13 +5314,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" +checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" dependencies = [ "alloy-consensus", "alloy-network", @@ -5333,29 +5334,32 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" +checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" dependencies = [ + "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "async-trait", - "derive_more 1.0.0", + "brotli", + "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", + "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" +checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5372,9 +5376,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" +checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5386,6 +5390,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", + "thiserror 2.0.3", ] [[package]] @@ -10391,9 +10396,6 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] [[package]] name = "spki" diff --git a/Cargo.toml b/Cargo.toml index 002b85f125a..f2565a1c92f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -471,10 +471,10 @@ alloy-transport-ipc = { version = "0.6.4", default-features = false } alloy-transport-ws = { version = "0.6.4", default-features = false } # op -op-alloy-rpc-types = "0.6.5" -op-alloy-rpc-types-engine = "0.6.5" -op-alloy-network = "0.6.5" -op-alloy-consensus = "0.6.5" +op-alloy-rpc-types = "0.6.7" +op-alloy-rpc-types-engine = "0.6.7" +op-alloy-network = "0.6.7" +op-alloy-consensus = "0.6.7" # misc aquamarine = "0.6" diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 36f11ee628b..1a951abadca 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -7,7 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; -use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; From ce4a32017a113051e6a872cbf593d5fd5d710856 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 11:48:09 +0100 Subject: [PATCH 574/970] chore: rm unused codec derive (#12700) --- crates/cli/commands/src/test_vectors/compact.rs | 4 +--- crates/optimism/storage/src/lib.rs | 4 +--- crates/primitives/src/receipt.rs | 3 --- crates/storage/db-api/src/models/mod.rs | 4 +--- 4 files changed, 3 insertions(+), 12 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index c498718e9fc..5490f568d3a 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -22,8 +22,7 @@ use reth_db::{ }; use reth_fs_util as fs; use reth_primitives::{ - Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, - TransactionSignedNoHash, TxType, + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -76,7 +75,6 @@ compact_types!( // reth-primitives Account, Receipt, - ReceiptWithBloom, // reth_codecs::alloy Authorization, GenesisAccount, diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index c3b8a71feea..391f26093ba 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -40,7 +40,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -65,7 +64,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index f4567de421e..93c0af1d971 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -72,7 +72,6 @@ impl Receipt { } } -// todo: replace with alloy receipt impl TxReceipt for Receipt { fn status_or_post_state(&self) -> Eip658Value { self.success.into() @@ -191,8 +190,6 @@ impl From for ReceiptWithBloom { /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ReceiptWithBloom { /// Bloom filter build from logs. pub bloom: Bloom, diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 00787194c71..7b1cd5a1ddb 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -312,7 +312,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -333,7 +333,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -355,7 +354,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); From 6977cf045349519d4acc7a0200cf0bf75968ce18 Mon Sep 17 00:00:00 2001 From: ftupas <35031356+ftupas@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:56:44 +0100 Subject: [PATCH 575/970] feat: add `TaskSpawner` to spawn validation requests as blocking (#12543) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/src/lib.rs | 2 + crates/rpc/rpc/src/validation.rs | 144 ++++++++++++++++++------------ 2 files changed, 91 insertions(+), 55 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0d86c838d51..207bc9ec5be 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1252,6 +1252,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) } } @@ -1416,6 +1417,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) .into_rpc() .into(), diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b997dec1e01..a5e29bb739f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,13 +5,13 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use alloy_rpc_types_engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; @@ -22,16 +22,16 @@ use reth_provider::{ }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; -use reth_rpc_eth_types::EthApiError; -use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_rpc_server_types::result::internal_rpc_err; +use reth_tasks::TaskSpawner; use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; -use tokio::sync::RwLock; +use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait -#[derive(Debug, derive_more::Deref)] +#[derive(Clone, Debug, derive_more::Deref)] pub struct ValidationApi { #[deref] inner: Arc>, @@ -47,6 +47,7 @@ where consensus: Arc, executor_provider: E, config: ValidationApiConfig, + task_spawner: Box, ) -> Self { let ValidationApiConfig { disallow } = config; @@ -58,6 +59,7 @@ where executor_provider, disallow, cached_state: Default::default(), + task_spawner, }); Self { inner } @@ -338,55 +340,23 @@ where Ok(versioned_hashes) } -} - -#[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi -where - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider - + Clone - + 'static, - E: BlockExecutorProvider, -{ - async fn validate_builder_submission_v1( - &self, - _request: BuilderBlockValidationRequest, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - async fn validate_builder_submission_v2( - &self, - _request: BuilderBlockValidationRequestV2, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v3 async fn validate_builder_submission_v3( &self, request: BuilderBlockValidationRequestV3, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( ExecutionPayload::V3(request.request.execution_payload), ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, - versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, }), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -394,15 +364,13 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() } - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v4 async fn validate_builder_submission_v4( &self, request: BuilderBlockValidationRequestV4, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( @@ -411,16 +379,13 @@ where CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + .validate_blobs_bundle(request.request.blobs_bundle)?, }, request.request.execution_requests.into(), ), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -428,8 +393,70 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v3(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v4(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? } } @@ -450,6 +477,8 @@ pub struct ValidationApiInner { /// latest head block state. Uses async `RwLock` to safely handle concurrent validation /// requests. cached_state: RwLock<(B256, CachedReads)>, + /// Task spawner for blocking operations + task_spawner: Box, } /// Configuration for validation API. @@ -476,6 +505,9 @@ pub enum ValidationApiError { ProposerPayment, #[error("invalid blobs bundle")] InvalidBlobsBundle, + /// When the transaction signature is invalid + #[error("invalid transaction signature")] + InvalidTransactionSignature, #[error("block accesses blacklisted address: {_0}")] Blacklist(Address), #[error(transparent)] @@ -486,4 +518,6 @@ pub enum ValidationApiError { Provider(#[from] ProviderError), #[error(transparent)] Execution(#[from] BlockExecutionError), + #[error(transparent)] + Payload(#[from] PayloadError), } From 868f3acdbcd4e7df4c351eff38fd644c1f285ba3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 20 Nov 2024 15:07:24 +0400 Subject: [PATCH 576/970] feat: integrate `HeaderValidator` + make `FileClient` generic over block (#12681) --- bin/reth/src/commands/debug_cmd/execution.rs | 2 +- .../consensus/beacon/src/engine/test_utils.rs | 2 +- crates/consensus/consensus/src/lib.rs | 23 +++++- crates/net/downloaders/src/bodies/bodies.rs | 3 +- crates/net/downloaders/src/bodies/task.rs | 7 +- crates/net/downloaders/src/file_client.rs | 76 ++++++++++--------- crates/net/downloaders/src/file_codec.rs | 20 +++-- .../src/headers/reverse_headers.rs | 6 +- crates/net/downloaders/src/headers/task.rs | 7 +- crates/net/downloaders/src/test_utils/mod.rs | 2 +- crates/net/p2p/src/bodies/downloader.rs | 4 +- crates/net/p2p/src/headers/client.rs | 3 +- crates/net/p2p/src/headers/downloader.rs | 4 +- crates/node/builder/src/setup.rs | 4 +- crates/primitives-traits/src/block/body.rs | 5 +- crates/primitives-traits/src/block/mod.rs | 14 +++- crates/stages/stages/src/lib.rs | 2 +- crates/stages/stages/src/sets.rs | 40 +++++++--- crates/stages/stages/src/stages/headers.rs | 8 +- 19 files changed, 143 insertions(+), 89 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index a6203ea2a73..0210142be71 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -73,7 +73,7 @@ impl> Command { { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 64daba2b453..0ebef1efe6e 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -370,7 +370,7 @@ where .with_tip_sender(tip_tx), TestPipelineConfig::Real => { let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone()) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task(); let body_downloader = BodiesDownloaderBuilder::default() diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index e059305911f..da90439af7f 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -11,7 +11,7 @@ extern crate alloc; -use alloc::{fmt::Debug, vec::Vec}; +use alloc::{fmt::Debug, sync::Arc, vec::Vec}; use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; @@ -46,7 +46,9 @@ impl<'a> PostExecutionInput<'a> { /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: HeaderValidator + Debug + Send + Sync { +pub trait Consensus: + AsHeaderValidator + HeaderValidator + Debug + Send + Sync +{ /// Ensures that body field values match the header. fn validate_body_against_header( &self, @@ -143,6 +145,23 @@ pub trait HeaderValidator: Debug + Send + Sync { ) -> Result<(), ConsensusError>; } +/// Helper trait to cast `Arc` to `Arc` +pub trait AsHeaderValidator: HeaderValidator { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} + +impl, H> AsHeaderValidator for T { + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } +} + /// Consensus Errors #[derive(Debug, PartialEq, Eq, Clone, derive_more::Display, derive_more::Error)] pub enum ConsensusError { diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index bebc51ad772..82f45dd23bf 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -20,6 +20,7 @@ use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, collections::BinaryHeap, + fmt::Debug, mem, ops::RangeInclusive, pin::Pin, @@ -298,7 +299,7 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { type Body = B::Body; diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index de1638f3e66..a2b63c8ed18 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::{ }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, ops::RangeInclusive, pin::Pin, @@ -47,10 +48,10 @@ impl TaskDownloader { /// use reth_network_p2p::bodies::client::BodiesClient; /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; - /// use std::sync::Arc; + /// use std::{fmt::Debug, sync::Arc}; /// /// fn t< - /// B: BodiesClient + 'static, + /// B: BodiesClient + 'static, /// Provider: HeaderProvider + Unpin + 'static, /// >( /// client: Arc, @@ -90,7 +91,7 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl BodyDownloader for TaskDownloader { type Body = B; fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 486d4a05127..ff352bc2304 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, io, path::Path}; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, B256}; +use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; use futures::Future; use itertools::Either; use reth_network_p2p::{ @@ -13,7 +13,8 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, SealedHeader}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::{Block, BlockBody, FullBlock}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -40,15 +41,15 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// /// This reads the entire file into memory, so it is not suitable for large files. #[derive(Debug)] -pub struct FileClient { +pub struct FileClient { /// The buffered headers retrieved when fetching new bodies. - headers: HashMap, + headers: HashMap, /// A mapping between block hash and number. hash_to_number: HashMap, /// The buffered bodies retrieved when fetching new headers. - bodies: HashMap, + bodies: HashMap, } /// An error that can occur when constructing and using a [`FileClient`]. @@ -73,7 +74,7 @@ impl From<&'static str> for FileClientError { } } -impl FileClient { +impl FileClient { /// Create a new file client from a file path. pub async fn new>(path: P) -> Result { let file = File::open(path).await?; @@ -114,7 +115,7 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. - pub fn tip_header(&self) -> Option { + pub fn tip_header(&self) -> Option> { self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } @@ -137,13 +138,13 @@ impl FileClient { } /// Use the provided bodies as the file client's block body buffer. - pub fn with_bodies(mut self, bodies: HashMap) -> Self { + pub fn with_bodies(mut self, bodies: HashMap) -> Self { self.bodies = bodies; self } /// Use the provided headers as the file client's block body buffer. - pub fn with_headers(mut self, headers: HashMap) -> Self { + pub fn with_headers(mut self, headers: HashMap) -> Self { self.headers = headers; for (number, header) in &self.headers { self.hash_to_number.insert(header.hash_slow(), *number); @@ -162,14 +163,14 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } /// Returns a mutable iterator over bodies in the client. /// /// Panics, if file client headers and bodies are not mapping 1-1. - pub fn bodies_iter_mut(&mut self) -> impl Iterator { + pub fn bodies_iter_mut(&mut self) -> impl Iterator { let bodies = &mut self.bodies; let numbers = &self.hash_to_number; bodies.iter_mut().map(|(hash, body)| (numbers[hash], body)) @@ -177,27 +178,28 @@ impl FileClient { /// Returns the current number of transactions in the client. pub fn total_transactions(&self) -> usize { - self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions.len()) + self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len()) } } -impl FromReader for FileClient { +impl FromReader for FileClient { type Error = FileClientError; /// Initialize the [`FileClient`] from bytes that have been read from file. - fn from_reader( - reader: B, + fn from_reader( + reader: R, num_bytes: u64, ) -> impl Future, Self::Error>> where - B: AsyncReadExt + Unpin, + R: AsyncReadExt + Unpin, { let mut headers = HashMap::default(); let mut hash_to_number = HashMap::default(); let mut bodies = HashMap::default(); // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + let mut stream = + FramedRead::with_capacity(reader, BlockFileCodec::::default(), num_bytes as usize); trace!(target: "downloaders::file", target_num_bytes=num_bytes, @@ -225,13 +227,13 @@ impl FromReader for FileClient { } Err(err) => return Err(err), }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); + let block_number = block.header().number(); + let block_hash = block.header().hash_slow(); // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert(block_hash, block.into()); + headers.insert(block.header().number(), block.header().clone()); + hash_to_number.insert(block_hash, block.header().number()); + bodies.insert(block_hash, block.body().clone()); if log_interval == 0 { trace!(target: "downloaders::file", @@ -260,9 +262,9 @@ impl FromReader for FileClient { } } -impl HeadersClient for FileClient { - type Header = Header; - type Output = HeadersFut; +impl HeadersClient for FileClient { + type Header = B::Header; + type Output = HeadersFut; fn get_headers_with_priority( &self, @@ -311,9 +313,9 @@ impl HeadersClient for FileClient { } } -impl BodiesClient for FileClient { - type Body = BlockBody; - type Output = BodiesFut; +impl BodiesClient for FileClient { + type Body = B::Body; + type Output = BodiesFut; fn get_block_bodies_with_priority( &self, @@ -336,7 +338,7 @@ impl BodiesClient for FileClient { } } -impl DownloadClient for FileClient { +impl DownloadClient for FileClient { fn report_bad_message(&self, _peer_id: PeerId) { warn!("Reported a bad message on a file client, the file may be corrupted or invalid"); // noop @@ -542,7 +544,7 @@ mod tests { // create an empty file let file = tempfile::tempfile().unwrap(); - let client = + let client: Arc = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone())); let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), @@ -567,14 +569,14 @@ mod tests { let p0 = child_header(&p1); let file = tempfile::tempfile().unwrap(); - let client = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_headers( - HashMap::from([ + let client: Arc = Arc::new( + FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([ (0u64, p0.clone().unseal()), (1, p1.clone().unseal()), (2, p2.clone().unseal()), (3, p3.clone().unseal()), - ]), - )); + ])), + ); let mut downloader = ReverseHeadersDownloaderBuilder::default() .stream_batch_size(3) @@ -596,7 +598,7 @@ mod tests { // Generate some random blocks let (file, headers, _) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() @@ -621,7 +623,7 @@ mod tests { let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // insert headers in db for the bodies downloader insert_headers(factory.db_ref().db(), &headers); diff --git a/crates/net/downloaders/src/file_codec.rs b/crates/net/downloaders/src/file_codec.rs index 3e754f9cf49..57a15b6c888 100644 --- a/crates/net/downloaders/src/file_codec.rs +++ b/crates/net/downloaders/src/file_codec.rs @@ -3,7 +3,6 @@ use crate::file_client::FileClientError; use alloy_primitives::bytes::{Buf, BytesMut}; use alloy_rlp::{Decodable, Encodable}; -use reth_primitives::Block; use tokio_util::codec::{Decoder, Encoder}; /// Codec for reading raw block bodies from a file. @@ -19,10 +18,16 @@ use tokio_util::codec::{Decoder, Encoder}; /// /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. -pub(crate) struct BlockFileCodec; +pub(crate) struct BlockFileCodec(std::marker::PhantomData); -impl Decoder for BlockFileCodec { - type Item = Block; +impl Default for BlockFileCodec { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl Decoder for BlockFileCodec { + type Item = B; type Error = FileClientError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -31,18 +36,17 @@ impl Decoder for BlockFileCodec { } let buf_slice = &mut src.as_ref(); - let body = - Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + let body = B::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; src.advance(src.len() - buf_slice.len()); Ok(Some(body)) } } -impl Encoder for BlockFileCodec { +impl Encoder for BlockFileCodec { type Error = FileClientError; - fn encode(&mut self, item: Block, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: B, dst: &mut BytesMut) -> Result<(), Self::Error> { item.encode(dst); Ok(()) } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 2d79e0a7af6..63a20ff27f5 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -9,7 +9,7 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_consensus::{Consensus, HeaderValidator}; +use reth_consensus::HeaderValidator; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ @@ -68,7 +68,7 @@ impl From for ReverseHeadersDownloaderError { #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc>, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. @@ -1165,7 +1165,7 @@ impl ReverseHeadersDownloaderBuilder { pub fn build( self, client: H, - consensus: Arc>, + consensus: Arc>, ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 81c4cd80da3..3dbfd5e3615 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::headers::{ use reth_primitives::SealedHeader; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, pin::Pin, task::{ready, Context, Poll}, @@ -44,10 +45,10 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_consensus::Consensus; + /// # use reth_consensus::HeaderValidator; /// # use reth_network_p2p::headers::client::HeadersClient; /// # use reth_primitives_traits::BlockHeader; - /// # fn t + 'static>(consensus:Arc>, client: Arc) { + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -82,7 +83,7 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { +impl HeaderDownloader for TaskDownloader { type Header = H; fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 7755c5e6017..635383ce3f3 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -43,7 +43,7 @@ pub(crate) async fn generate_bodies_file( let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone()); let file: File = tempfile::tempfile().unwrap().into(); - let mut writer = FramedWrite::new(file, BlockFileCodec); + let mut writer = FramedWrite::new(file, BlockFileCodec::default()); // rlp encode one after the other for block in raw_block_bodies { diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index f335b21438b..7008c08e522 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -2,7 +2,7 @@ use super::response::BlockResponse; use crate::error::DownloadResult; use alloy_primitives::BlockNumber; use futures::Stream; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. pub type BodyDownloaderResult = DownloadResult>>; @@ -16,7 +16,7 @@ pub trait BodyDownloader: Send + Sync + Stream> + Unpin { /// The type of the body that is being downloaded. - type Body: Send + Sync + Unpin + 'static; + type Body: Debug + Send + Sync + Unpin + 'static; /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 3e8f9296e07..4be6208c4a2 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -50,7 +50,8 @@ impl HeadersRequest { } /// The headers future type -pub type HeadersFut = Pin>> + Send + Sync>>; +pub type HeadersFut = + Pin>> + Send + Sync>>; /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 03ab467bafb..eca03bdb4e7 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -7,6 +7,8 @@ use futures::Stream; use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; use reth_primitives_traits::BlockWithParent; +use std::fmt::Debug; + /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, @@ -21,7 +23,7 @@ pub trait HeaderDownloader: + Unpin { /// The header type being downloaded. - type Header: Send + Sync + Unpin + 'static; + type Header: Debug + Send + Sync + Unpin + 'static; /// Updates the gap to sync which ranges from local head to the sync target /// diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 337e37eeedd..400e3d84456 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -27,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -46,7 +46,7 @@ where { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.bodies) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 66c9c2d2e3a..11c4dd785dd 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -3,14 +3,13 @@ use alloc::fmt; use alloy_consensus::Transaction; -use reth_codecs::Compact; use crate::{FullSignedTx, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullBlockBody: BlockBody + Compact {} +pub trait FullBlockBody: BlockBody {} -impl FullBlockBody for T where T: BlockBody + Compact {} +impl FullBlockBody for T where T: BlockBody {} /// Abstraction for block's body. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 67658c39e07..01ed75bd967 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,14 +5,20 @@ pub mod header; use alloc::fmt; -use reth_codecs::Compact; +use alloy_rlp::{Decodable, Encodable}; -use crate::{BlockHeader, FullBlockHeader, InMemorySize, MaybeSerde}; +use crate::{BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: Block {} +pub trait FullBlock: + Block + Encodable + Decodable +{ +} -impl FullBlock for T where T: Block {} +impl FullBlock for T where + T: Block + Encodable + Decodable +{ +} /// Abstraction of block data type. // todo: make sealable super-trait, depends on diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 38a0f209dbd..ce6a96cf349 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -37,7 +37,7 @@ //! # let consensus: Arc = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), -//! # consensus.clone() +//! # consensus.clone().as_header_validator() //! # ); //! # let provider_factory = create_test_provider_factory(); //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index a25fcd4e1e5..d04a96470a0 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -76,7 +76,11 @@ use tokio::sync::watch; /// - [`PruneStage`] (execute) /// - [`FinishStage`] #[derive(Debug)] -pub struct DefaultStages { +pub struct DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Configuration for the online stages online: OnlineStages, /// Executor factory needs for execution stage @@ -87,13 +91,17 @@ pub struct DefaultStages { prune_modes: PruneModes, } -impl DefaultStages { +impl DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of default stages with default values. #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -122,6 +130,8 @@ impl DefaultStages { impl DefaultStages where E: BlockExecutorProvider, + H: HeaderDownloader, + B: BodyDownloader, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( @@ -164,13 +174,17 @@ where /// These stages *can* be run without network access if the specified downloaders are /// themselves offline. #[derive(Debug)] -pub struct OnlineStages { +pub struct OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Sync gap provider for the headers stage. provider: Provider, /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -179,12 +193,16 @@ pub struct OnlineStages { stages_config: StageConfig, } -impl OnlineStages { +impl OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of online stages with default values. pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -196,7 +214,7 @@ impl OnlineStages { impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader

+ 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -229,7 +247,7 @@ where provider, header_downloader, tip, - consensus.clone(), + consensus.clone().as_header_validator(), stages_config.etl, )) .add_stage(bodies) @@ -239,7 +257,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, @@ -250,7 +268,7 @@ where self.provider, self.header_downloader, self.tip, - self.consensus.clone(), + self.consensus.clone().as_header_validator(), self.stages_config.etl.clone(), )) .add_stage(BodyStage::new(self.body_downloader)) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 1ec55f7fd80..100fe4e979a 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,7 +1,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -48,7 +48,7 @@ pub struct HeaderStage { /// The tip for the stage. tip: watch::Receiver, /// Consensus client implementation - consensus: Arc, + consensus: Arc>, /// Current sync gap. sync_gap: Option, /// ETL collector with `HeaderHash` -> `BlockNumber` @@ -63,14 +63,14 @@ pub struct HeaderStage { impl HeaderStage where - Downloader: HeaderDownloader, + Downloader: HeaderDownloader
, { /// Create a new header stage pub fn new( database: Provider, downloader: Downloader, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, etl_config: EtlConfig, ) -> Self { Self { From f12d7a92647dd2fd60286104c3b73cc63f40e206 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 12:15:48 +0100 Subject: [PATCH 577/970] chore: use ethereum-forks types directly (#12702) --- Cargo.lock | 1 + crates/net/network/Cargo.toml | 1 + crates/net/network/src/builder.rs | 9 ++- crates/net/network/src/cache.rs | 3 +- crates/net/network/src/config.rs | 18 +++--- crates/net/network/src/discovery.rs | 24 ++++--- crates/net/network/src/error.rs | 6 +- crates/net/network/src/eth_requests.rs | 20 +++--- crates/net/network/src/fetch/client.rs | 12 ++-- crates/net/network/src/fetch/mod.rs | 20 +++--- crates/net/network/src/flattened_response.rs | 5 +- crates/net/network/src/import.rs | 6 +- crates/net/network/src/listener.rs | 3 +- crates/net/network/src/manager.rs | 62 +++++++++---------- crates/net/network/src/message.rs | 9 ++- crates/net/network/src/network.rs | 25 ++++---- crates/net/network/src/peers.rs | 28 ++++----- crates/net/network/src/protocol.rs | 13 ++-- crates/net/network/src/session/active.rs | 17 +++-- crates/net/network/src/session/conn.rs | 9 ++- crates/net/network/src/session/counter.rs | 3 +- crates/net/network/src/session/handle.rs | 14 ++--- crates/net/network/src/session/mod.rs | 15 +++-- crates/net/network/src/state.rs | 40 ++++++------ crates/net/network/src/swarm.rs | 30 +++++---- crates/net/network/src/test_utils/init.rs | 3 +- crates/net/network/src/test_utils/testnet.rs | 30 +++++---- crates/net/network/src/transactions/config.rs | 3 +- .../net/network/src/transactions/fetcher.rs | 36 +++++------ crates/net/network/src/transactions/mod.rs | 43 ++++++------- .../network/src/transactions/validation.rs | 3 +- 31 files changed, 234 insertions(+), 277 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0326a37f499..09bcccf652e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7802,6 +7802,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-eth-wire-types", + "reth-ethereum-forks", "reth-fs-util", "reth-metrics", "reth-net-banlist", diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index ad8e65dffc6..ab9e89c2ca8 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -23,6 +23,7 @@ reth-network-p2p.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true +reth-ethereum-forks.workspace = true reth-eth-wire.workspace = true reth-eth-wire-types.workspace = true reth-ecies.workspace = true diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 31038906b25..da003a2e290 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,15 +1,14 @@ //! Builder support for configuring the entire setup. -use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; -use reth_network_api::test_utils::PeersHandleProvider; -use reth_transaction_pool::TransactionPool; -use tokio::sync::mpsc; - use crate::{ eth_requests::EthRequestHandler, transactions::{TransactionsManager, TransactionsManagerConfig}, NetworkHandle, NetworkManager, }; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_transaction_pool::TransactionPool; +use tokio::sync::mpsc; /// We set the max channel capacity of the `EthRequestHandler` to 256 /// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node. diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index 758b4916790..32389ec4b7b 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -1,11 +1,10 @@ //! Network cache support use core::hash::BuildHasher; -use std::{fmt, hash::Hash}; - use derive_more::{Deref, DerefMut}; use itertools::Itertools; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; +use std::{fmt, hash::Hash}; /// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity. /// diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index db7b384c2b3..e54000895a7 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,7 +1,11 @@ //! Network config support -use std::{collections::HashSet, net::SocketAddr, sync::Arc}; - +use crate::{ + error::NetworkError, + import::{BlockImport, ProofOfStakeBlockImport}, + transactions::TransactionsManagerConfig, + NetworkHandle, NetworkManager, +}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; @@ -9,19 +13,13 @@ use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{ EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives, Status, }; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_primitives::{ForkFilter, Head}; use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; - -use crate::{ - error::NetworkError, - import::{BlockImport, ProofOfStakeBlockImport}, - transactions::TransactionsManagerConfig, - NetworkHandle, NetworkManager, -}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 5b2bb788f47..c0b9ffa7630 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,13 +1,9 @@ //! Discovery support for the network. -use std::{ - collections::VecDeque, - net::{IpAddr, SocketAddr}, - pin::Pin, - sync::Arc, - task::{ready, Context, Poll}, +use crate::{ + cache::LruMap, + error::{NetworkError, ServiceKind}, }; - use enr::Enr; use futures::StreamExt; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; @@ -15,20 +11,22 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerAddr; -use reth_primitives::{EnrForkIdEntry, ForkId}; use secp256k1::SecretKey; +use std::{ + collections::VecDeque, + net::{IpAddr, SocketAddr}, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::{wrappers::ReceiverStream, Stream}; use tracing::trace; -use crate::{ - cache::LruMap, - error::{NetworkError, ServiceKind}, -}; - /// Default max capacity for cache of discovered peers. /// /// Default is 10 000 peers. diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 2709c4a2907..8156392b22f 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -1,7 +1,6 @@ //! Possible errors when interacting with the network. -use std::{fmt, io, io::ErrorKind, net::SocketAddr}; - +use crate::session::PendingSessionHandshakeError; use reth_dns_discovery::resolver::ResolveError; use reth_ecies::ECIESErrorImpl; use reth_eth_wire::{ @@ -9,8 +8,7 @@ use reth_eth_wire::{ DisconnectReason, }; use reth_network_types::BackoffKind; - -use crate::session::PendingSessionHandshakeError; +use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 8121b9675ed..0f9348a42ce 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -1,12 +1,9 @@ //! Blocks/Headers management for the p2p network. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Duration, +use crate::{ + budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, + metrics::EthRequestHandlerMetrics, }; - use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; @@ -20,14 +17,15 @@ use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; use reth_primitives::BlockBody; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use crate::{ - budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, - metrics::EthRequestHandlerMetrics, -}; - // Limits: /// Maximum number of receipts to serve. diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index 584c079b8d8..e24ea167f5f 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -1,10 +1,6 @@ //! A client implementation that can interact with the network and download data. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - +use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; use alloy_primitives::B256; use futures::{future, future::Either}; use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; @@ -18,10 +14,12 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Front-end API for fetching data from the network. /// diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 8af6300b705..c5474587adf 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -4,15 +4,7 @@ mod client; pub use client::FetchClient; -use std::{ - collections::{HashMap, VecDeque}, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, -}; - +use crate::message::BlockRequest; use alloy_primitives::B256; use futures::StreamExt; use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; @@ -24,11 +16,17 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; +use std::{ + collections::{HashMap, VecDeque}, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::message::BlockRequest; - type InflightHeadersRequest = Request>>; type InflightBodiesRequest = Request, PeerRequestResult>>; diff --git a/crates/net/network/src/flattened_response.rs b/crates/net/network/src/flattened_response.rs index 78c3c35f598..df2a9db78ae 100644 --- a/crates/net/network/src/flattened_response.rs +++ b/crates/net/network/src/flattened_response.rs @@ -1,10 +1,9 @@ +use futures::Future; +use pin_project::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; - -use futures::Future; -use pin_project::pin_project; use tokio::sync::oneshot::{error::RecvError, Receiver}; /// Flatten a [Receiver] message in order to get rid of the [RecvError] result diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 749b3c347b3..f63bf2dd7a8 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,10 +1,8 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. -use std::task::{Context, Poll}; - -use reth_network_peers::PeerId; - use crate::message::NewBlockMessage; +use reth_network_peers::PeerId; +use std::task::{Context, Poll}; /// Abstraction over block import. pub trait BlockImport: std::fmt::Debug + Send + Sync { diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index e5094f68948..9fcc15a104b 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -1,13 +1,12 @@ //! Contains connection-oriented interfaces. +use futures::{ready, Stream}; use std::{ io, net::SocketAddr, pin::Pin, task::{Context, Poll}, }; - -use futures::{ready, Stream}; use tokio::net::{TcpListener, TcpStream}; /// A tcp connection listener. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0738be1bcac..c9caa412274 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -15,18 +15,26 @@ //! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections //! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session. -use std::{ - net::SocketAddr, - path::Path, - pin::Pin, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, - time::{Duration, Instant}, +use crate::{ + budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, + config::NetworkConfig, + discovery::Discovery, + error::{NetworkError, ServiceKind}, + eth_requests::IncomingEthRequest, + import::{BlockImport, BlockImportOutcome, BlockValidation}, + listener::ConnectionListener, + message::{NewBlockMessage, PeerMessage}, + metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + network::{NetworkHandle, NetworkHandleMessage}, + peers::PeersManager, + poll_nested_stream_with_budget, + protocol::IntoRlpxSubProtocol, + session::SessionManager, + state::NetworkState, + swarm::{Swarm, SwarmEvent}, + transactions::NetworkTransactionEvent, + FetchClient, NetworkBuilder, }; - use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ @@ -44,31 +52,21 @@ use reth_storage_api::BlockNumReader; use reth_tasks::shutdown::GracefulShutdown; use reth_tokio_util::EventSender; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + path::Path, + pin::Pin, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::mpsc::{self, error::TrySendError}; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use crate::{ - budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, - config::NetworkConfig, - discovery::Discovery, - error::{NetworkError, ServiceKind}, - eth_requests::IncomingEthRequest, - import::{BlockImport, BlockImportOutcome, BlockValidation}, - listener::ConnectionListener, - message::{NewBlockMessage, PeerMessage}, - metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - network::{NetworkHandle, NetworkHandleMessage}, - peers::PeersManager, - poll_nested_stream_with_budget, - protocol::IntoRlpxSubProtocol, - session::SessionManager, - state::NetworkState, - swarm::{Swarm, SwarmEvent}, - transactions::NetworkTransactionEvent, - FetchClient, NetworkBuilder, -}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Manages the _entire_ state of the network. /// diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 3040577415c..4821e259292 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -3,11 +3,6 @@ //! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. //! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, -use std::{ - sync::Arc, - task::{ready, Context, Poll}, -}; - use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; @@ -20,6 +15,10 @@ use reth_eth_wire::{ use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_primitives::{PooledTransactionsElement, ReceiptWithBloom}; +use std::{ + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 2fa3fd90efe..0af0cb1ad46 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,11 +1,7 @@ -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, - }, +use crate::{ + config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, + transactions::TransactionsHandle, FetchClient, }; - use alloy_primitives::B256; use enr::Enr; use parking_lot::Mutex; @@ -15,6 +11,7 @@ use reth_eth_wire::{ DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, NewPooledTransactionHashes, SharedTransactions, }; +use reth_ethereum_forks::Head; use reth_network_api::{ test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -24,20 +21,22 @@ use reth_network_api::{ use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; -use reth_primitives::{Head, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, +}; use tokio::sync::{ mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, -}; - /// A _shareable_ network frontend. Used to interact with the network. /// /// See also [`NetworkManager`](crate::NetworkManager). diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 4855ff5e743..d4b762e3e12 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1,16 +1,13 @@ //! Peer related implementations -use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, - fmt::Display, - io::{self}, - net::{IpAddr, SocketAddr}, - task::{Context, Poll}, - time::Duration, +use crate::{ + error::SessionError, + session::{Direction, PendingSessionHandshakeError}, + swarm::NetworkConnectionState, }; - use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; +use reth_ethereum_forks::ForkId; use reth_net_banlist::BanList; use reth_network_api::test_utils::{PeerCommand, PeersHandle}; use reth_network_peers::{NodeRecord, PeerId}; @@ -22,7 +19,14 @@ use reth_network_types::{ ConnectionsConfig, Peer, PeerAddr, PeerConnectionState, PeerKind, PeersConfig, ReputationChangeKind, ReputationChangeOutcome, ReputationChangeWeights, }; -use reth_primitives::ForkId; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Display, + io::{self}, + net::{IpAddr, SocketAddr}, + task::{Context, Poll}, + time::Duration, +}; use thiserror::Error; use tokio::{ sync::mpsc, @@ -31,12 +35,6 @@ use tokio::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{trace, warn}; -use crate::{ - error::SessionError, - session::{Direction, PendingSessionHandshakeError}, - swarm::NetworkConnectionState, -}; - /// Maintains the state of _all_ the peers known to the network. /// /// This is supposed to be owned by the network itself, but can be reached via the [`PeersHandle`]. diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index eeffd1c95f4..aa0749c2c7b 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -2,19 +2,18 @@ //! //! See also -use std::{ - fmt, - net::SocketAddr, - ops::{Deref, DerefMut}, - pin::Pin, -}; - use alloy_primitives::bytes::BytesMut; use futures::Stream; use reth_eth_wire::{ capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, }; use reth_network_api::{Direction, PeerId}; +use std::{ + fmt, + net::SocketAddr, + ops::{Deref, DerefMut}, + pin::Pin, +}; /// A trait that allows to offer additional RLPx-based application-level protocols when establishing /// a peer-to-peer connection. diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index f979a912cd4..76701f7e2ab 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,6 +11,14 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, + session::{ + conn::EthRlpxConnection, + handle::{ActiveSessionMessage, SessionCommand}, + SessionId, + }, +}; use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; @@ -34,15 +42,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, trace}; -use crate::{ - message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, - session::{ - conn::EthRlpxConnection, - handle::{ActiveSessionMessage, SessionCommand}, - SessionId, - }, -}; - // Constants for timeout updating. /// Minimum timeout value diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 5329f01028b..45b83d1c487 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -1,10 +1,5 @@ //! Connection types for a session -use std::{ - pin::Pin, - task::{Context, Poll}, -}; - use futures::{Sink, Stream}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ @@ -13,6 +8,10 @@ use reth_eth_wire::{ multiplex::{ProtocolProxy, RlpxSatelliteStream}, EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, }; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use tokio::net::TcpStream; /// The type of the underlying peer network connection. diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index 0d8f764f206..052cf1e2570 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -1,8 +1,7 @@ +use super::ExceedsSessionLimit; use reth_network_api::Direction; use reth_network_types::SessionLimits; -use super::ExceedsSessionLimit; - /// Keeps track of all sessions. #[derive(Debug, Clone)] pub struct SessionCounter { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index f80428630d9..d167dc0e6ec 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,7 +1,10 @@ //! Session handles. -use std::{io, net::SocketAddr, sync::Arc, time::Instant}; - +use crate::{ + message::PeerMessage, + session::{conn::EthRlpxConnection, Direction, SessionId}, + PendingSessionHandshakeError, +}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, @@ -10,17 +13,12 @@ use reth_eth_wire::{ use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerKind; +use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, oneshot, }; -use crate::{ - message::PeerMessage, - session::{conn::EthRlpxConnection, Direction, SessionId}, - PendingSessionHandshakeError, -}; - /// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello /// message which exchanges the `capabilities` of the peer. /// diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index a95f0e88910..816c540cee2 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -23,6 +23,12 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::PeerMessage, + metrics::SessionManagerMetrics, + protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, + session::active::ActiveSession, +}; use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; @@ -31,11 +37,11 @@ use reth_eth_wire::{ Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, Status, UnauthedEthStream, UnauthedP2PStream, }; +use reth_ethereum_forks::{ForkFilter, ForkId, ForkTransition, Head}; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; use secp256k1::SecretKey; @@ -48,13 +54,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, - session::active::ActiveSession, -}; - /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] pub struct SessionId(usize); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index c51f115c52f..473c76c260f 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -1,17 +1,13 @@ //! Keeps track of the state of the network. -use std::{ - collections::{HashMap, VecDeque}, - fmt, - net::{IpAddr, SocketAddr}, - ops::Deref, - sync::{ - atomic::{AtomicU64, AtomicUsize}, - Arc, - }, - task::{Context, Poll}, +use crate::{ + cache::LruCache, + discovery::Discovery, + fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, + message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, + peers::{PeerAction, PeersManager}, + FetchClient, }; - use alloy_consensus::BlockHeader; use alloy_primitives::B256; use rand::seq::SliceRandom; @@ -19,23 +15,25 @@ use reth_eth_wire::{ BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlockHashes, Status, }; +use reth_ethereum_forks::ForkId; use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::{PeerAddr, PeerKind}; -use reth_primitives::ForkId; use reth_primitives_traits::Block; +use std::{ + collections::{HashMap, VecDeque}, + fmt, + net::{IpAddr, SocketAddr}, + ops::Deref, + sync::{ + atomic::{AtomicU64, AtomicUsize}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::oneshot; use tracing::{debug, trace}; -use crate::{ - cache::LruCache, - discovery::Discovery, - fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, - message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, - peers::{PeerAction, PeersManager}, - FetchClient, -}; - /// Cache limit of blocks to keep track of for a single peer. const PEER_BLOCK_CACHE_LIMIT: u32 = 512; diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 655934f207a..47447783f42 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -1,11 +1,11 @@ -use std::{ - io, - net::SocketAddr, - pin::Pin, - sync::Arc, - task::{Context, Poll}, +use crate::{ + listener::{ConnectionListener, ListenerEvent}, + message::PeerMessage, + peers::InboundConnectionError, + protocol::IntoRlpxSubProtocol, + session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, + state::{NetworkState, StateAction}, }; - use futures::Stream; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, @@ -13,16 +13,14 @@ use reth_eth_wire::{ }; use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; -use tracing::trace; - -use crate::{ - listener::{ConnectionListener, ListenerEvent}, - message::PeerMessage, - peers::InboundConnectionError, - protocol::IntoRlpxSubProtocol, - session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, - state::{NetworkState, StateAction}, +use std::{ + io, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; +use tracing::trace; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 767f6818091..87ccbb5f9d7 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,7 +1,6 @@ -use std::{net::SocketAddr, time::Duration}; - use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; use reth_network_peers::PeerId; +use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a `GethInstance` pub const GETH_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index d92272a871e..a64084f2cf9 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -1,13 +1,13 @@ //! A network implementation for testing purposes. -use std::{ - fmt, - future::Future, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - pin::Pin, - task::{Context, Poll}, +use crate::{ + builder::ETH_REQUEST_CHANNEL_CAPACITY, + error::NetworkError, + eth_requests::EthRequestHandler, + protocol::IntoRlpxSubProtocol, + transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, + NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; - use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_chainspec::{Hardforks, MAINNET}; @@ -27,6 +27,13 @@ use reth_transaction_pool::{ EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; +use std::{ + fmt, + future::Future, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + pin::Pin, + task::{Context, Poll}, +}; use tokio::{ sync::{ mpsc::{channel, unbounded_channel}, @@ -35,15 +42,6 @@ use tokio::{ task::JoinHandle, }; -use crate::{ - builder::ETH_REQUEST_CHANNEL_CAPACITY, - error::NetworkError, - eth_requests::EthRequestHandler, - protocol::IntoRlpxSubProtocol, - transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, - NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, -}; - /// A test network consisting of multiple peers. pub struct Testnet { /// All running peers in the network. diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index b838f7cfe71..db59ffac5cc 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,5 +1,3 @@ -use derive_more::Constructor; - use super::{ DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, @@ -9,6 +7,7 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use derive_more::Constructor; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 4c4119c85c0..0833f677409 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -25,13 +25,18 @@ //! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large //! enough to buffer many hashes during network failure, to allow for recovery. -use std::{ - collections::HashMap, - pin::Pin, - task::{ready, Context, Poll}, - time::Duration, +use super::{ + config::TransactionFetcherConfig, + constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, + MessageFilter, PeerMetadata, PooledTransactions, + SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, +}; +use crate::{ + cache::{LruCache, LruMap}, + duration_metered_exec, + metrics::TransactionFetcherMetrics, + transactions::{validation, PartiallyFilterMessage}, }; - use alloy_primitives::TxHash; use derive_more::{Constructor, Deref}; use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt}; @@ -47,23 +52,16 @@ use reth_primitives::PooledTransactionsElement; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; +use std::{ + collections::HashMap, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError}; use tracing::{debug, trace}; use validation::FilterOutcome; -use super::{ - config::TransactionFetcherConfig, - constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, - MessageFilter, PeerMetadata, PooledTransactions, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, -}; -use crate::{ - cache::{LruCache, LruMap}, - duration_metered_exec, - metrics::TransactionFetcherMetrics, - transactions::{validation, PartiallyFilterMessage}, -}; - /// The type responsible for fetching missing transactions from peers. /// /// This will keep track of unique transaction hashes that are currently being fetched and submits diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 241f01ae8ab..b499f0ac422 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -18,20 +18,19 @@ pub use validation::*; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE}; -use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; - -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use crate::{ + budget::{ + DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, + DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, - task::{Context, Poll}, - time::{Duration, Instant}, + cache::LruCache, + duration_metered_exec, metered_poll_nested_stream_with_budget, + metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + NetworkHandle, }; - use alloy_primitives::{TxHash, B256}; +use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; use futures::{stream::FuturesUnordered, Future, StreamExt}; use reth_eth_wire::{ DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, @@ -56,22 +55,20 @@ use reth_transaction_pool::{ GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tracing::{debug, trace}; -use crate::{ - budget::{ - DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, - }, - cache::LruCache, - duration_metered_exec, metered_poll_nested_stream_with_budget, - metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - NetworkHandle, -}; - /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 7bfe07761a2..1575d9f3374 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -2,8 +2,6 @@ //! and [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) //! announcements. Validation and filtering of announcements is network dependent. -use std::{fmt, fmt::Display, mem}; - use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; use alloy_primitives::{Signature, TxHash}; use derive_more::{Deref, DerefMut}; @@ -12,6 +10,7 @@ use reth_eth_wire::{ MAX_MESSAGE_SIZE, }; use reth_primitives::TxType; +use std::{fmt, fmt::Display, mem}; use tracing::trace; /// The size of a decoded signature in bytes. From 402f96600b20b2f103ecab32fee372a4a4a4f579 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 13:18:58 +0100 Subject: [PATCH 578/970] chore: move txtype constants to codecs (#12705) --- crates/optimism/primitives/src/tx_type.rs | 35 ++++++------- crates/primitives/src/receipt.rs | 4 +- crates/primitives/src/transaction/mod.rs | 20 +++----- crates/primitives/src/transaction/tx_type.rs | 54 +++++++------------- crates/storage/codecs/src/lib.rs | 5 +- crates/storage/codecs/src/txtype.rs | 15 ++++++ 6 files changed, 60 insertions(+), 73 deletions(-) create mode 100644 crates/storage/codecs/src/txtype.rs diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index 9ddfe77b192..c6e7fcc0a80 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -2,25 +2,15 @@ //! `OpTxType` implements `reth_primitives_traits::TxType`. //! This type is required because a `Compact` impl is needed on the deposit tx type. -use core::fmt::Debug; - -#[cfg(feature = "reth-codec")] -use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; +use core::fmt::Debug; use derive_more::{ derive::{From, Into}, Display, }; use op_alloy_consensus::OpTxType as AlloyOpTxType; -#[cfg(feature = "reth-codec")] -use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; -#[cfg(feature = "reth-codec")] -use reth_primitives::transaction::{ - COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, - COMPACT_IDENTIFIER_LEGACY, -}; use reth_primitives_traits::{InMemorySize, TxType}; /// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement [`TxType`] trait. @@ -145,16 +135,17 @@ impl reth_codecs::Compact for OpTxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; match self.0 { AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, AlloyOpTxType::Eip7702 => { - buf.put_u8(EIP7702_TX_TYPE_ID); + buf.put_u8(alloy_consensus::constants::EIP7702_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } AlloyOpTxType::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -164,14 +155,16 @@ impl reth_codecs::Compact for OpTxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), - COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), - COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { - EIP7702_TX_TYPE_ID => Self(AlloyOpTxType::Eip7702), - DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { + Self(AlloyOpTxType::Eip7702) + } + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), } } @@ -185,8 +178,10 @@ impl reth_codecs::Compact for OpTxType { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use bytes::BytesMut; - use reth_codecs::Compact; + use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; + use reth_codecs::{txtype::*, Compact}; use rstest::rstest; #[test] diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 93c0af1d971..77a44dc39e5 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -393,7 +393,7 @@ impl Decodable for ReceiptWithBloom { Self::decode_receipt(buf, TxType::Eip7702) } #[cfg(feature = "optimism")] - crate::transaction::DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { buf.advance(1); Self::decode_receipt(buf, TxType::Deposit) } @@ -529,7 +529,7 @@ impl ReceiptWithBloomEncoder<'_> { } #[cfg(feature = "optimism")] TxType::Deposit => { - out.put_u8(crate::transaction::DEPOSIT_TX_TYPE_ID); + out.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); } } out.put_slice(payload.as_ref()); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 41522744a2f..d50aea14c46 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -57,17 +57,9 @@ pub mod signature; pub(crate) mod util; mod variant; +use alloc::vec::Vec; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; -#[cfg(feature = "optimism")] -pub use tx_type::DEPOSIT_TX_TYPE_ID; -#[cfg(any(test, feature = "reth-codec"))] -pub use tx_type::{ - COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, - COMPACT_IDENTIFIER_LEGACY, -}; - -use alloc::vec::Vec; use reth_primitives_traits::{transaction::TransactionExt, SignedTransaction}; use revm_primitives::{AuthorizationList, TxEnv}; @@ -594,19 +586,19 @@ impl reth_codecs::Compact for Transaction { use bytes::Buf; match identifier { - COMPACT_IDENTIFIER_LEGACY => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); (Self::Legacy(tx), buf) } - COMPACT_IDENTIFIER_EIP2930 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); (Self::Eip2930(tx), buf) } - COMPACT_IDENTIFIER_EIP1559 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Self::Eip1559(tx), buf) } - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { // An identifier of 3 indicates that the transaction type did not fit into // the backwards compatible 2 bit identifier, their transaction types are // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, @@ -623,7 +615,7 @@ impl reth_codecs::Compact for Transaction { (Self::Eip7702(tx), buf) } #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index caa6d872854..597487564df 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -8,28 +8,6 @@ use derive_more::Display; use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; -/// Identifier parameter for legacy transaction -#[cfg(any(test, feature = "reth-codec"))] -pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; - -/// Identifier parameter for EIP-2930 transaction -#[cfg(any(test, feature = "reth-codec"))] -pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; - -/// Identifier parameter for EIP-1559 transaction -#[cfg(any(test, feature = "reth-codec"))] -pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; - -/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier -/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is -/// read from the buffer as a single byte. -#[cfg(any(test, feature = "reth-codec"))] -pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; - -/// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. -#[cfg(feature = "optimism")] -pub const DEPOSIT_TX_TYPE_ID: u8 = 126; - /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on @@ -136,7 +114,7 @@ impl From for u8 { TxType::Eip4844 => EIP4844_TX_TYPE_ID, TxType::Eip7702 => EIP7702_TX_TYPE_ID, #[cfg(feature = "optimism")] - TxType::Deposit => DEPOSIT_TX_TYPE_ID, + TxType::Deposit => op_alloy_consensus::DEPOSIT_TX_TYPE_ID, } } } @@ -195,6 +173,8 @@ impl reth_codecs::Compact for TxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; + match self { Self::Legacy => COMPACT_IDENTIFIER_LEGACY, Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, @@ -209,7 +189,7 @@ impl reth_codecs::Compact for TxType { } #[cfg(feature = "optimism")] Self::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -222,16 +202,16 @@ impl reth_codecs::Compact for TxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self::Legacy, - COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, - COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { EIP4844_TX_TYPE_ID => Self::Eip4844, EIP7702_TX_TYPE_ID => Self::Eip7702, #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => Self::Deposit, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } } @@ -274,19 +254,21 @@ impl Decodable for TxType { #[cfg(test)] mod tests { + use super::*; use alloy_primitives::hex; - use reth_codecs::Compact; + use reth_codecs::{txtype::*, Compact}; use rstest::rstest; - use super::*; - #[rstest] #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] - #[cfg_attr(feature = "optimism", case(U64::from(DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)))] + #[cfg_attr( + feature = "optimism", + case(U64::from(op_alloy_consensus::DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)) + )] #[case(U64::MAX, Err("invalid tx type"))] fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { let tx_type_result = TxType::try_from(input); @@ -299,7 +281,7 @@ mod tests { #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] fn test_txtype_to_compact( #[case] tx_type: TxType, #[case] expected_identifier: usize, @@ -318,7 +300,7 @@ mod tests { #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] fn test_txtype_from_compact( #[case] expected_type: TxType, #[case] identifier: usize, @@ -337,7 +319,7 @@ mod tests { #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] - #[cfg_attr(feature = "optimism", case(&[DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + #[cfg_attr(feature = "optimism", case(&[op_alloy_consensus::DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { let tx_type_result = TxType::decode(&mut &input[..]); assert_eq!(tx_type_result, expected) diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 284c6454f83..86d397ad24f 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -17,13 +17,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use reth_codecs_derive::*; use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; -extern crate alloc; use alloc::vec::Vec; #[cfg(feature = "test-utils")] @@ -33,6 +34,8 @@ pub mod alloy; #[cfg(any(test, feature = "alloy"))] mod alloy; +pub mod txtype; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/storage/codecs/src/txtype.rs b/crates/storage/codecs/src/txtype.rs new file mode 100644 index 00000000000..ce392b59cd0 --- /dev/null +++ b/crates/storage/codecs/src/txtype.rs @@ -0,0 +1,15 @@ +//! Commonly used constants for transaction types. + +/// Identifier parameter for legacy transaction +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; + +/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; From f9b764f6e80661f283f81d69d32e0429392c5263 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 13:28:47 +0100 Subject: [PATCH 579/970] chore: misc lint suggestion (#12706) --- crates/net/network/src/flattened_response.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/net/network/src/flattened_response.rs b/crates/net/network/src/flattened_response.rs index df2a9db78ae..61dae9c7c72 100644 --- a/crates/net/network/src/flattened_response.rs +++ b/crates/net/network/src/flattened_response.rs @@ -23,10 +23,7 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); - this.receiver.poll(cx).map(|r| match r { - Ok(r) => r, - Err(err) => Err(err.into()), - }) + this.receiver.poll(cx).map(|r| r.unwrap_or_else(|err| Err(err.into()))) } } From e3702cfc87449294da061f02a571a23061b8ab50 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 13:28:58 +0100 Subject: [PATCH 580/970] chore: move tracing futures to workspace (#12707) --- Cargo.toml | 1 + crates/rpc/rpc/Cargo.toml | 2 +- crates/tasks/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f2565a1c92f..e4ca1b7bc28 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -555,6 +555,7 @@ hyper = "1.3" hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } +tracing-futures = "0.2" tower = "0.4" tower-http = "0.6" diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5418cd1eb3a..804ecd11120 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -82,7 +82,7 @@ parking_lot.workspace = true # misc tracing.workspace = true -tracing-futures = "0.2" +tracing-futures.workspace = true futures.workspace = true rand.workspace = true serde.workspace = true diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 82c80c0932b..68d8e958979 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # async tokio = { workspace = true, features = ["sync", "rt"] } -tracing-futures = "0.2" +tracing-futures.workspace = true futures-util.workspace = true # metrics From 0d4b1e73d43bbdf901673c63ba2a1e4194e89bfe Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:19:54 +0100 Subject: [PATCH 581/970] test(tx-pool): add more unit tests for tx-pool best (#12691) --- crates/transaction-pool/src/pool/best.rs | 124 ++++++++++++++++++++++- 1 file changed, 123 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 7c2e5a025b7..171faccf7c2 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -401,7 +401,7 @@ mod tests { use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, - Priority, + BestTransactions, Priority, }; use alloy_primitives::U256; use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; @@ -897,5 +897,127 @@ mod tests { assert_eq!(block.next(()).unwrap().signer(), address_regular); } + #[test] + fn test_best_with_fees_iter_no_blob_fee_required() { + // Tests transactions without blob fees where base fees are checked. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 0; // No blob fee requirement + + // Insert transactions with max_fee_per_gas above the base fee + for nonce in 0..5 { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned as no blob fee requirement is imposed + for nonce in 0..5 { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + } + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mix_of_blob_and_non_blob_transactions() { + // Tests mixed scenarios with both blob and non-blob transactions. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Add a non-blob transaction that satisfies the base fee + let tx_non_blob = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_non_blob.clone())), 0); + + // Add a blob transaction that satisfies both base fee and blob fee + let tx_blob = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_blob.clone())), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // Verify both transactions are returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_with_skipping_blobs() { + // Tests the skip_blobs functionality to ensure blob transactions are skipped. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a blob transaction + let tx_blob = MockTransaction::eip4844().rng_hash().with_nonce(0).with_blob_fee(100); + let valid_blob_tx = f.validated(tx_blob); + pool.add_transaction(Arc::new(valid_blob_tx), 0); + + // Add a non-blob transaction + let tx_non_blob = MockTransaction::eip1559().rng_hash().with_nonce(1).with_max_fee(200); + let valid_non_blob_tx = f.validated(tx_non_blob.clone()); + pool.add_transaction(Arc::new(valid_non_blob_tx), 0); + + let mut best = pool.best(); + best.skip_blobs(); + + // Only the non-blob transaction should be returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_no_updates() { + // Tests the no_updates functionality to ensure it properly clears the + // new_transaction_receiver. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a transaction + let tx = MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(100); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (_tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Ensure receiver is set + assert!(best.new_transaction_receiver.is_some()); + + // Call no_updates to clear the receiver + best.no_updates(); + + // Ensure receiver is cleared + assert!(best.new_transaction_receiver.is_none()); + } + // TODO: Same nonce test } From ce0bcee416088251436a8e273563296dcf89863b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 15:25:16 +0100 Subject: [PATCH 582/970] chore: move tx builder fns (#12709) --- crates/net/network/src/transactions/mod.rs | 66 +++++++++++----------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index b499f0ac422..d72b657bffc 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1480,7 +1480,7 @@ enum PropagateTransactionsBuilder { Full(FullTransactionsBuilder), } -impl PropagateTransactionsBuilder { +impl PropagateTransactionsBuilder { /// Create a builder for pooled transactions fn pooled(version: EthVersion) -> Self { Self::Pooled(PooledTransactionsHashesBuilder::new(version)) @@ -1491,21 +1491,6 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } - /// Appends all transactions - fn extend<'a>(&mut self, txs: impl IntoIterator) { - for tx in txs { - self.push(tx); - } - } - - /// Appends a transaction to the list. - fn push(&mut self, transaction: &PropagateTransaction) { - match self { - Self::Pooled(builder) => builder.push(transaction), - Self::Full(builder) => builder.push(transaction), - } - } - /// Returns true if no transactions are recorded. fn is_empty(&self) -> bool { match self { @@ -1515,7 +1500,7 @@ impl PropagateTransactionsBuilder { } /// Consumes the type and returns the built messages that should be sent to the peer. - fn build(self) -> PropagateTransactions { + fn build(self) -> PropagateTransactions { match self { Self::Pooled(pooled) => { PropagateTransactions { pooled: Some(pooled.build()), full: None } @@ -1525,6 +1510,23 @@ impl PropagateTransactionsBuilder { } } +impl PropagateTransactionsBuilder { + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(tx); + } + } + + /// Appends a transaction to the list. + fn push(&mut self, transaction: &PropagateTransaction) { + match self { + Self::Pooled(builder) => builder.push(transaction), + Self::Full(builder) => builder.push(transaction), + } + } +} + /// Represents how the transactions should be sent to a peer if any. struct PropagateTransactions { /// The pooled transaction hashes to send. @@ -1547,9 +1549,7 @@ struct FullTransactionsBuilder { pooled: PooledTransactionsHashesBuilder, } -// === impl FullTransactionsBuilder === - -impl FullTransactionsBuilder { +impl FullTransactionsBuilder { /// Create a builder for the negotiated version of the peer's session fn new(version: EthVersion) -> Self { Self { @@ -1559,6 +1559,20 @@ impl FullTransactionsBuilder { } } + /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. + fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.pooled.is_empty() + } + + /// Returns the messages that should be propagated to the peer. + fn build(self) -> PropagateTransactions { + let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); + let full = Some(self.transactions).filter(|full| !full.is_empty()); + PropagateTransactions { pooled, full } + } +} + +impl FullTransactionsBuilder { /// Appends all transactions. fn extend(&mut self, txs: impl IntoIterator) { for tx in txs { @@ -1600,18 +1614,6 @@ impl FullTransactionsBuilder { self.total_size = new_size; self.transactions.push(Arc::clone(&transaction.transaction)); } - - /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. - fn is_empty(&self) -> bool { - self.transactions.is_empty() && self.pooled.is_empty() - } - - /// Returns the messages that should be propagated to the peer. - fn build(self) -> PropagateTransactions { - let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); - let full = Some(self.transactions).filter(|full| !full.is_empty()); - PropagateTransactions { pooled, full } - } } /// A helper type to create the pooled transactions message based on the negotiated version of the From b3b083fb82df484db030f2b14f46d640ec57bf9c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 15:33:56 +0100 Subject: [PATCH 583/970] chore: add generics to broadcast (#12714) --- crates/net/eth-wire-types/src/broadcast.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index f37c5e74a04..7d74085d355 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -90,9 +90,9 @@ generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 10)] -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); impl Transactions { @@ -102,14 +102,14 @@ impl Transactions { } } -impl From> for Transactions { - fn from(txs: Vec) -> Self { +impl From> for Transactions { + fn from(txs: Vec) -> Self { Self(txs) } } -impl From for Vec { - fn from(txs: Transactions) -> Self { +impl From> for Vec { + fn from(txs: Transactions) -> Self { txs.0 } } @@ -121,9 +121,9 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 20)] -pub struct SharedTransactions( +pub struct SharedTransactions( /// New transactions for the peer to include in its mempool. - pub Vec>, + pub Vec>, ); /// A wrapper type for all different new pooled transaction types From 68abcb1fe9a47e8d42f30f433e6deb7c671698cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 15:39:19 +0100 Subject: [PATCH 584/970] chore: rm unused file (#12713) --- crates/net/eth-wire-types/src/message.rs | 1 - crates/net/eth-wire-types/src/primitives.rs | 3 +-- crates/net/eth-wire-types/src/response.rs | 29 --------------------- 3 files changed, 1 insertion(+), 32 deletions(-) delete mode 100644 crates/net/eth-wire-types/src/response.rs diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index f83e21124e3..93d42fb3ea0 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -12,7 +12,6 @@ use super::{ NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; - use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use std::{fmt::Debug, sync::Arc}; diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index c8b62cb0a82..ff7ab1c801b 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,9 +1,8 @@ //! Abstraction over primitive types in network messages. -use std::fmt::Debug; - use alloy_rlp::{Decodable, Encodable}; use reth_primitives_traits::{Block, BlockHeader}; +use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See /// [`crate::EthMessage`] for more context. diff --git a/crates/net/eth-wire-types/src/response.rs b/crates/net/eth-wire-types/src/response.rs deleted file mode 100644 index dfcf5ed56a8..00000000000 --- a/crates/net/eth-wire-types/src/response.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::{ - BlockBodies, BlockHeaders, NodeData, PooledTransactions, Receipts, RequestPair, Status, -}; - -// This type is analogous to the `zebra_network::Response` type. -/// An ethereum network response for version 66. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Response { - /// The request does not have a response. - Nil, - - /// The [`Status`](super::Status) message response in the eth protocol handshake. - Status(Status), - - /// The response to a [`Request::GetBlockHeaders`](super::Request::GetBlockHeaders) request. - BlockHeaders(RequestPair), - - /// The response to a [`Request::GetBlockBodies`](super::Request::GetBlockBodies) request. - BlockBodies(RequestPair), - - /// The response to a [`Request::GetPooledTransactions`](super::Request::GetPooledTransactions) request. - PooledTransactions(RequestPair), - - /// The response to a [`Request::GetNodeData`](super::Request::GetNodeData) request. - NodeData(RequestPair), - - /// The response to a [`Request::GetReceipts`](super::Request::GetReceipts) request. - Receipts(RequestPair), -} From 04729f3c6655426f23e6ae8edd2abc1d2ca84e74 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 15:39:32 +0100 Subject: [PATCH 585/970] chore: introduce network primitives to transactions handle (#12711) --- crates/net/network/src/transactions/mod.rs | 63 ++++++++++++---------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index d72b657bffc..5bdf200e20f 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -82,42 +82,26 @@ pub type PoolImportFuture = Pin>> /// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes /// known by a specific peer. #[derive(Debug, Clone)] -pub struct TransactionsHandle { +pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] - manager_tx: mpsc::UnboundedSender, + manager_tx: mpsc::UnboundedSender>, } /// Implementation of the `TransactionsHandle` API for use in testnet via type /// [`PeerHandle`](crate::test_utils::PeerHandle). -impl TransactionsHandle { - fn send(&self, cmd: TransactionsCommand) { +impl TransactionsHandle { + fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); } /// Fetch the [`PeerRequestSender`] for the given peer. - async fn peer_handle(&self, peer_id: PeerId) -> Result, RecvError> { - let (tx, rx) = oneshot::channel(); - self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); - rx.await - } - - /// Requests the transactions directly from the given peer. - /// - /// Returns `None` if the peer is not connected. - /// - /// **Note**: this returns the response from the peer as received. - pub async fn get_pooled_transactions_from( + async fn peer_handle( &self, peer_id: PeerId, - hashes: Vec, - ) -> Result>, RequestError> { - let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; - + ) -> Result>>, RecvError> { let (tx, rx) = oneshot::channel(); - let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; - peer.try_send(request).ok(); - - rx.await?.map(|res| Some(res.0)) + self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); + rx.await } /// Manually propagate the transaction that belongs to the hash. @@ -179,6 +163,27 @@ impl TransactionsHandle { } } +impl TransactionsHandle { + /// Requests the transactions directly from the given peer. + /// + /// Returns `None` if the peer is not connected. + /// + /// **Note**: this returns the response from the peer as received. + pub async fn get_pooled_transactions_from( + &self, + peer_id: PeerId, + hashes: Vec, + ) -> Result>, RequestError> { + let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; + + let (tx, rx) = oneshot::channel(); + let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; + peer.try_send(request).ok(); + + rx.await?.map(|res| Some(res.0)) + } +} + /// Manages transactions on top of the p2p network. /// /// This can be spawned to another task and is supposed to be run as background service. @@ -235,12 +240,12 @@ pub struct TransactionsManager, + command_tx: mpsc::UnboundedSender>, /// Incoming commands from [`TransactionsHandle`]. /// /// This will only receive commands if a user manually sends a command to the manager through /// the [`TransactionsHandle`] to interact with this type directly. - command_rx: UnboundedReceiverStream, + command_rx: UnboundedReceiverStream>, /// A stream that yields new __pending__ transactions. /// /// A transaction is considered __pending__ if it is executable on the current state of the @@ -312,7 +317,7 @@ impl TransactionsManager { impl TransactionsManager { /// Returns a new handle that can send commands to this type. - pub fn handle(&self) -> TransactionsHandle { + pub fn handle(&self) -> TransactionsHandle { TransactionsHandle { manager_tx: self.command_tx.clone() } } @@ -1732,7 +1737,7 @@ impl PeerMetadata { /// Commands to send to the [`TransactionsManager`] #[derive(Debug)] -enum TransactionsCommand { +enum TransactionsCommand { /// Propagate a transaction hash to the network. PropagateHash(B256), /// Propagate transaction hashes to a specific peer. @@ -1751,7 +1756,7 @@ enum TransactionsCommand { /// Requests a clone of the sender sender channel to the peer. GetPeerSender { peer_id: PeerId, - peer_request_sender: oneshot::Sender>, + peer_request_sender: oneshot::Sender>>>, }, } From 8df9045fd8de4d7f9d71e4798dec2d302e964702 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 17:02:02 +0100 Subject: [PATCH 586/970] fix: use correct timestamp for op receipt (#12716) --- crates/optimism/rpc/src/eth/receipt.rs | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index a801a408fd5..2cc771d0e44 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -54,10 +54,10 @@ where /// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a /// deposit transaction. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] pub struct OpReceiptFieldsBuilder { /// Block timestamp. - pub l1_block_timestamp: u64, + pub block_timestamp: u64, /// The L1 fee for transaction. pub l1_fee: Option, /// L1 gas used by transaction. @@ -84,8 +84,19 @@ pub struct OpReceiptFieldsBuilder { impl OpReceiptFieldsBuilder { /// Returns a new builder. - pub fn new(block_timestamp: u64) -> Self { - Self { l1_block_timestamp: block_timestamp, ..Default::default() } + pub const fn new(block_timestamp: u64) -> Self { + Self { + block_timestamp, + l1_fee: None, + l1_data_gas: None, + l1_fee_scalar: None, + l1_base_fee: None, + deposit_nonce: None, + deposit_receipt_version: None, + l1_base_fee_scalar: None, + l1_blob_base_fee: None, + l1_blob_base_fee_scalar: None, + } } /// Applies [`L1BlockInfo`](revm::L1BlockInfo). @@ -96,7 +107,7 @@ impl OpReceiptFieldsBuilder { l1_block_info: revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); - let timestamp = self.l1_block_timestamp; + let timestamp = self.block_timestamp; self.l1_fee = Some( l1_block_info @@ -140,7 +151,7 @@ impl OpReceiptFieldsBuilder { /// Builds the [`OpTransactionReceiptFields`] object. pub const fn build(self) -> OpTransactionReceiptFields { let Self { - l1_block_timestamp: _, // used to compute other fields + block_timestamp: _, // used to compute other fields l1_fee, l1_data_gas: l1_gas_used, l1_fee_scalar, @@ -187,6 +198,7 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { + let timestamp = meta.timestamp; let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { match receipt.tx_type { @@ -211,7 +223,7 @@ impl OpReceiptBuilder { } })?; - let op_receipt_fields = OpReceiptFieldsBuilder::default() + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp) .l1_block_info(chain_spec, transaction, l1_block_info)? .deposit_nonce(receipt.deposit_nonce) .deposit_version(receipt.deposit_receipt_version) From a0d7503eb1ffd467d333a05c4ff21ef5771323cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Nov 2024 18:52:06 +0100 Subject: [PATCH 587/970] feat: use network primitives pooled transaction AT (#12718) --- crates/net/eth-wire-types/src/message.rs | 6 +++++- crates/net/network-api/src/events.rs | 2 +- crates/net/network/src/manager.rs | 6 +++--- crates/net/network/src/message.rs | 6 +++--- crates/net/network/src/network.rs | 4 ++-- crates/net/network/src/transactions/mod.rs | 10 ++++------ 6 files changed, 18 insertions(+), 16 deletions(-) diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 93d42fb3ea0..3d34b8cae80 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -207,7 +207,11 @@ pub enum EthMessage { /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. - PooledTransactions(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::PooledTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + PooledTransactions(RequestPair>), /// Represents a `GetNodeData` request-response pair. GetNodeData(RequestPair), /// Represents a `NodeData` request-response pair. diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index af392b6f9ea..624c43f5e1b 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -154,7 +154,7 @@ pub enum PeerRequest { /// The request for pooled transactions. request: GetPooledTransactions, /// The channel to send the response for pooled transactions. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests `NodeData` from the peer. /// diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index c9caa412274..c1db91773e3 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -90,7 +90,7 @@ pub struct NetworkManager { event_sender: EventSender>>, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. - to_transactions_manager: Option>, + to_transactions_manager: Option>>, /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. /// @@ -120,7 +120,7 @@ pub struct NetworkManager { impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`TransactionsManager`](crate::transactions::TransactionsManager). - pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender) { + pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender>) { self.to_transactions_manager = Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE)); } @@ -409,7 +409,7 @@ impl NetworkManager { /// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if /// configured. - fn notify_tx_manager(&self, event: NetworkTransactionEvent) { + fn notify_tx_manager(&self, event: NetworkTransactionEvent) { if let Some(ref tx) = self.to_transactions_manager { let _ = tx.send(event); } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 4821e259292..c2511f4e16a 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -14,7 +14,7 @@ use reth_eth_wire::{ }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::ReceiptWithBloom; use std::{ sync::Arc, task::{ready, Context, Poll}, @@ -89,7 +89,7 @@ pub enum PeerResponse { /// Represents a response to a request for pooled transactions. PooledTransactions { /// The receiver channel for the response to a pooled transactions request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for `NodeData`. NodeData { @@ -146,7 +146,7 @@ pub enum PeerResponseResult { /// Represents a result containing block bodies or an error. BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. - PooledTransactions(RequestResult>), + PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 0af0cb1ad46..496b4250ffd 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -140,7 +140,7 @@ impl NetworkHandle { /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. - pub async fn transactions_handle(&self) -> Option { + pub async fn transactions_handle(&self) -> Option> { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::GetTransactionsHandle(tx)); rx.await.unwrap() @@ -504,7 +504,7 @@ pub(crate) enum NetworkHandleMessage>), /// Retrieves the `TransactionsHandle` via a oneshot sender. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), /// Initiates a graceful shutdown of the network via a oneshot sender. Shutdown(oneshot::Sender<()>), /// Sets the network state between hibernation and active. diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 5bdf200e20f..f7a6fb8805e 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -161,9 +161,7 @@ impl TransactionsHandle { let res = self.get_transaction_hashes(vec![peer]).await?; Ok(res.into_values().next().unwrap_or_default()) } -} -impl TransactionsHandle { /// Requests the transactions directly from the given peer. /// /// Returns `None` if the peer is not connected. @@ -173,7 +171,7 @@ impl TransactionsHandle { &self, peer_id: PeerId, hashes: Vec, - ) -> Result>, RequestError> { + ) -> Result>, RequestError> { let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; let (tx, rx) = oneshot::channel(); @@ -1762,7 +1760,7 @@ enum TransactionsCommand { /// All events related to transactions emitted by the network. #[derive(Debug)] -pub enum NetworkTransactionEvent { +pub enum NetworkTransactionEvent { /// Represents the event of receiving a list of transactions from a peer. /// /// This indicates transactions that were broadcasted to us from the peer. @@ -1786,10 +1784,10 @@ pub enum NetworkTransactionEvent { /// The received `GetPooledTransactions` request. request: GetPooledTransactions, /// The sender for responding to the request with a result of `PooledTransactions`. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Represents the event of receiving a `GetTransactionsHandle` request. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), } /// Tracks stats about the [`TransactionsManager`]. From 749f98e021099b6ff53232532b4941de524a258d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 20 Nov 2024 22:56:35 +0400 Subject: [PATCH 588/970] chore: add header and body generics to `SealedBlockWithSenders` (#12717) --- crates/blockchain-tree/src/state.rs | 18 +++++++++--------- crates/chain-state/src/notifications.rs | 10 +++++----- crates/evm/execution-types/src/chain.rs | 8 ++++---- crates/exex/exex/src/manager.rs | 12 ++++++------ crates/optimism/evm/src/lib.rs | 2 +- crates/primitives/src/block.rs | 22 +++++++++++++++------- 6 files changed, 40 insertions(+), 32 deletions(-) diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index ca8af6f9b58..a8e43240f4f 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -184,7 +184,7 @@ mod tests { let mut tree_state = TreeState::new(0, vec![], 5); // Create a chain with two blocks - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::random(); let block2_hash = B256::random(); @@ -254,8 +254,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -296,8 +296,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -336,7 +336,7 @@ mod tests { // Create a block with a random hash and add it to the buffer let block_hash = B256::random(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); // Add the block to the buffered blocks in the TreeState @@ -363,8 +363,8 @@ mod tests { let ancestor_hash = B256::random(); let descendant_hash = B256::random(); - let mut ancestor_block = SealedBlockWithSenders::default(); - let mut descendant_block = SealedBlockWithSenders::default(); + let mut ancestor_block: SealedBlockWithSenders = Default::default(); + let mut descendant_block: SealedBlockWithSenders = Default::default(); ancestor_block.block.header.set_hash(ancestor_hash); descendant_block.block.header.set_hash(descendant_hash); @@ -397,7 +397,7 @@ mod tests { let receipt1 = Receipt::default(); let receipt2 = Receipt::default(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); let receipts = vec![receipt1, receipt2]; diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 84fb120d4b2..865f2bd6584 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -202,7 +202,7 @@ mod tests { #[test] fn test_commit_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); @@ -235,7 +235,7 @@ mod tests { #[test] fn test_reorg_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -277,7 +277,7 @@ mod tests { #[test] fn test_block_receipts_commit() { // Create a default block instance for use in block definitions. - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define unique hashes for two blocks to differentiate them in the chain. let block1_hash = B256::new([0x01; 32]); @@ -343,7 +343,7 @@ mod tests { #[test] fn test_block_receipts_reorg() { // Define block1 for the old chain segment, which will be reverted. - let mut old_block1 = SealedBlockWithSenders::default(); + let mut old_block1: SealedBlockWithSenders = Default::default(); old_block1.set_block_number(1); old_block1.set_hash(B256::new([0x01; 32])); old_block1.block.body.transactions.push(TransactionSigned::default()); @@ -367,7 +367,7 @@ mod tests { Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); // Define block2 for the new chain segment, which will be committed. - let mut new_block1 = SealedBlockWithSenders::default(); + let mut new_block1: SealedBlockWithSenders = Default::default(); new_block1.set_block_number(2); new_block1.set_hash(B256::new([0x02; 32])); new_block1.block.body.transactions.push(TransactionSigned::default()); diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index b32b53b885e..2c672884d60 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -663,7 +663,7 @@ mod tests { #[test] fn chain_append() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -727,13 +727,13 @@ mod tests { vec![], ); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); block1.senders.push(Address::new([4; 20])); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); @@ -797,7 +797,7 @@ mod tests { use reth_primitives::{Receipt, Receipts, TxType}; // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index e8902e0f352..c8c06021efa 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -736,7 +736,7 @@ mod tests { ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -754,7 +754,7 @@ mod tests { assert_eq!(exex_manager.next_id, 1); // Push another notification - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(20); @@ -792,7 +792,7 @@ mod tests { ); // Push some notifications to fill part of the buffer - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1051,11 +1051,11 @@ mod tests { assert_eq!(exex_handle.next_notification_id, 0); // Setup two blocks for the chain commit notification - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(11); @@ -1104,7 +1104,7 @@ mod tests { // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index be1fb6d3227..52b974e6c86 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -551,7 +551,7 @@ mod tests { #[test] fn receipts_by_block_hash() { // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5c47c49f437..57c63d53a43 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,7 +2,7 @@ use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRec use alloc::vec::Vec; use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -493,22 +493,30 @@ where } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { + fn default() -> Self { + Self { block: SealedBlock::default(), senders: Default::default() } + } +} + +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: SealedBlock, senders: Vec
) -> Option { + (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } +} +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] pub fn into_components(self) -> (SealedBlock, Vec
) { From 0c5984179e0304c11ad4c5bc0057508b65441338 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 21 Nov 2024 02:48:39 +0400 Subject: [PATCH 589/970] refactor(storage): unify blocks insertion logic (#12694) --- Cargo.lock | 1 + bin/reth/src/commands/debug_cmd/merkle.rs | 4 +- crates/blockchain-tree/src/blockchain_tree.rs | 23 ++- crates/blockchain-tree/src/externals.rs | 14 +- crates/blockchain-tree/src/shareable.rs | 8 +- .../commands/src/init_state/without_evm.rs | 15 +- crates/consensus/beacon/src/engine/mod.rs | 9 +- crates/engine/local/src/service.rs | 4 +- crates/engine/service/src/service.rs | 4 +- crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/src/persistence.rs | 16 +- crates/exex/exex/src/backfill/test_utils.rs | 6 +- crates/exex/exex/src/manager.rs | 4 +- crates/exex/exex/src/notifications.rs | 5 +- crates/node/builder/src/launch/common.rs | 5 +- crates/stages/stages/src/stages/bodies.rs | 39 +---- .../stages/src/stages/hashing_account.rs | 5 +- crates/storage/db-api/src/models/mod.rs | 5 +- .../src/providers/database/metrics.rs | 28 --- .../provider/src/providers/database/mod.rs | 17 +- .../src/providers/database/provider.rs | 163 ++++++++---------- crates/storage/provider/src/providers/mod.rs | 5 +- crates/storage/provider/src/traits/block.rs | 34 +++- crates/storage/provider/src/writer/mod.rs | 79 +-------- 24 files changed, 226 insertions(+), 269 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09bcccf652e..017b84f6e49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7254,6 +7254,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index db4cd952e8d..bb8a6a2c4a1 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -21,7 +21,7 @@ use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -148,7 +148,7 @@ impl> Command { .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone())?; + provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; td += sealed_block.difficulty; let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1a8a390e99d..8e192492593 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1,6 +1,7 @@ //! Implementation of [`BlockchainTree`] use crate::{ + externals::TreeNodeTypes, metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, @@ -21,10 +22,10 @@ use reth_primitives::{ SealedHeader, StaticFileSegment, }; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, - HeaderProvider, ProviderError, StaticFileProviderFactory, + BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -93,7 +94,7 @@ impl BlockchainTree { impl BlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. @@ -1386,16 +1387,18 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_node_types::FullNodePrimitives; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ + providers::ProviderNodeTypes, test_utils::{ blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - ProviderFactory, + ProviderFactory, StorageLocation, }; use reth_revm::primitives::AccountInfo; use reth_stages_api::StageCheckpoint; @@ -1420,7 +1423,12 @@ mod tests { TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { + fn setup_genesis< + N: ProviderNodeTypes>, + >( + factory: &ProviderFactory, + mut genesis: SealedBlock, + ) { // insert genesis to db. genesis.header.set_block_number(10); @@ -1551,6 +1559,7 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), + StorageLocation::Database, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 4e22fcb78b6..76b65824854 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,8 +4,8 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::StaticFileSegment; +use reth_node_types::{Block, FullNodePrimitives, NodeTypesWithDB}; +use reth_primitives::{BlockBody, StaticFileSegment}; use reth_provider::{ providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StatsReader, @@ -13,6 +13,16 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. +pub trait TreeNodeTypes: + ProviderNodeTypes>> +{ +} +impl TreeNodeTypes for T where + T: ProviderNodeTypes>> +{ +} + /// A container for external components. /// /// This is a simple container for external components used throughout the blockchain tree diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index ec1f3cccf97..f997e0a062d 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,5 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. +use crate::externals::TreeNodeTypes; + use super::BlockchainTree; use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; @@ -36,7 +38,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -107,7 +109,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { @@ -170,7 +172,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn find_pending_state_provider( diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index c6e1f9a51dd..e3594a59363 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -3,12 +3,10 @@ use alloy_rlp::Decodable; use alloy_consensus::Header; use reth_node_builder::NodePrimitives; -use reth_primitives::{ - BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, -}; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, - StaticFileWriter, + StaticFileWriter, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; @@ -33,7 +31,9 @@ pub fn setup_without_evm( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StaticFileProviderFactory + StageCheckpointWriter + BlockWriter, + Provider: StaticFileProviderFactory + + StageCheckpointWriter + + BlockWriter, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); @@ -64,11 +64,12 @@ fn append_first_block( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter + StaticFileProviderFactory, + Provider: BlockWriter + StaticFileProviderFactory, { provider_rw.insert_block( - SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) .expect("no senders or txes"), + StorageLocation::Database, )?; let sf_provider = provider_rw.static_file_provider(); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 0b93ae0f29a..2ad06e68b67 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1991,7 +1991,8 @@ mod tests { use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_provider::{BlockWriter, ProviderFactory}; + use reth_node_types::FullNodePrimitives; + use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use reth_stages_api::StageCheckpoint; @@ -2169,7 +2170,10 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, N: ProviderNodeTypes>( + fn insert_blocks< + 'a, + N: ProviderNodeTypes>, + >( provider_factory: ProviderFactory, mut blocks: impl Iterator, ) { @@ -2179,6 +2183,7 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), + StorageLocation::Database, ) .map(drop) }) diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 4e4826be31d..3575bc133c6 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -27,7 +27,7 @@ use reth_engine_tree::{ EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, RequestHandlerEvent, }, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; @@ -59,7 +59,7 @@ where impl LocalEngineService where - N: EngineNodeTypes, + N: EngineNodeTypes + PersistenceNodeTypes, { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index cec9d981f1b..49233439e0a 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -8,7 +8,7 @@ use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ @@ -59,7 +59,7 @@ where impl EngineService where - N: EngineNodeTypes, + N: EngineNodeTypes + PersistenceNodeTypes, Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index d6e1c80a726..70be84a9f79 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -27,6 +27,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true @@ -107,4 +108,5 @@ test-utils = [ "reth-provider/test-utils", "reth-trie/test-utils", "reth-prune-types?/test-utils", + "reth-primitives-traits/test-utils", ] diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index e0c9e0362d0..0199ae3f461 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,6 +2,8 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; +use reth_primitives::BlockBody; +use reth_primitives_traits::{Block, FullNodePrimitives}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -16,6 +18,16 @@ use thiserror::Error; use tokio::sync::oneshot; use tracing::{debug, error}; +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within +/// [`PersistenceService`]. +pub trait PersistenceNodeTypes: + ProviderNodeTypes>> +{ +} +impl PersistenceNodeTypes for T where + T: ProviderNodeTypes>> +{ +} /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -60,7 +72,7 @@ impl PersistenceService { } } -impl PersistenceService { +impl PersistenceService { /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -198,7 +210,7 @@ impl PersistenceHandle { } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + pub fn spawn_service( provider_factory: ProviderFactory, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 80af408c5c8..5d0f88f517d 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -9,6 +9,7 @@ use reth_evm::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_primitives::{ Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; @@ -57,7 +58,7 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, { let provider = provider_factory.provider()?; @@ -161,7 +162,7 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -183,6 +184,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index c8c06021efa..e3d3a3c0690 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -657,7 +657,7 @@ mod tests { use reth_primitives::SealedBlockWithSenders; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, - BlockWriter, Chain, DatabaseProviderFactory, TransactionVariant, + BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -1238,7 +1238,7 @@ mod tests { .seal_with_senders() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); - provider_rw.insert_block(block.clone()).unwrap(); + provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); let provider = BlockchainProvider2::new(provider_factory).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 14cfe9be4d9..baf504166d1 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -403,7 +403,7 @@ mod tests { use reth_primitives::Block; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, - Chain, DatabaseProviderFactory, + Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; use tokio::sync::mpsc; @@ -431,6 +431,7 @@ mod tests { let provider_rw = provider_factory.provider_rw()?; provider_rw.insert_block( node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + StorageLocation::Database, )?; provider_rw.commit()?; @@ -574,7 +575,7 @@ mod tests { ..Default::default() }; let provider_rw = provider.database_provider_rw()?; - provider_rw.insert_block(node_head_block)?; + provider_rw.insert_block(node_head_block, StorageLocation::Database)?; provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 903b0980354..225f2029c28 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -12,7 +12,8 @@ use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + externals::TreeNodeTypes, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, }; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; @@ -631,7 +632,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 80185eade87..b6eab349e16 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -19,6 +19,7 @@ use reth_primitives::StaticFileSegment; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, + StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -122,7 +123,7 @@ where let (from_block, to_block) = input.next_block_range().into_inner(); // Get id for the next tx_num of zero if there are no transactions. - let mut next_tx_num = provider + let next_tx_num = provider .tx_ref() .cursor_read::()? .last()? @@ -130,8 +131,6 @@ where .unwrap_or_default(); let static_file_provider = provider.static_file_provider(); - let mut static_file_producer = - static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; // Make sure Transactions static file is at the same height. If it's further, this // input execution was interrupted previously and we need to unwind the static file. @@ -145,6 +144,8 @@ where // stage run. So, our only solution is to unwind the static files and proceed from the // database expected height. Ordering::Greater => { + let mut static_file_producer = + static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; static_file_producer .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; // Since this is a database <-> static file inconsistency, we commit the change @@ -168,40 +169,16 @@ where let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); - let mut highest_block = from_block; - - // Firstly, write transactions to static files - for response in &buffer { - let block_number = response.block_number(); - - // Increment block on static file header. - if block_number > 0 { - static_file_producer.increment_block(block_number)?; - } - - match response { - BlockResponse::Full(block) => { - // Write transactions - for transaction in block.body.transactions() { - static_file_producer.append_transaction(next_tx_num, transaction)?; - - // Increment transaction id for each transaction. - next_tx_num += 1; - } - } - BlockResponse::Empty(_) => {} - }; - - highest_block = block_number; - } + let highest_block = buffer.last().map(|r| r.block_number()).unwrap_or(from_block); - // Write bodies to database. This will NOT write transactions to database as we've already - // written them directly to static files. + // Write bodies to database. provider.append_block_bodies( buffer .into_iter() .map(|response| (response.block_number(), response.into_body())) .collect(), + // We are writing transactions directly to static files. + StorageLocation::StaticFiles, )?; // The stage is "done" if: diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 1ca0e1aa132..ecca1e0716c 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -61,7 +61,10 @@ impl AccountHashingStage { pub fn seed( provider: &reth_provider::DatabaseProvider, opts: SeedOpts, - ) -> Result, StageError> { + ) -> Result, StageError> + where + N::Primitives: reth_primitives_traits::FullNodePrimitives, + { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 7b1cd5a1ddb..5d18711922e 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,7 +8,9 @@ use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash, TxType}; +use reth_primitives::{ + Account, Bytecode, Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType, +}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -225,6 +227,7 @@ impl_compression_for_compact!( Bytecode, AccountBeforeTx, TransactionSignedNoHash, + TransactionSigned, CompactU256, StageCheckpoint, PruneCheckpoint, diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 7e9ee7202c0..4ee8f1ce5b1 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -22,14 +22,6 @@ impl Default for DurationsRecorder { } impl DurationsRecorder { - /// Saves the provided duration for future logging and instantly reports as a metric with - /// `action` label. - pub(crate) fn record_duration(&mut self, action: Action, duration: Duration) { - self.actions.push((action, duration)); - self.current_metrics.record_duration(action, duration); - self.latest = Some(self.start.elapsed()); - } - /// Records the duration since last record, saves it for future logging and instantly reports as /// a metric with `action` label. pub(crate) fn record_relative(&mut self, action: Action) { @@ -56,11 +48,6 @@ pub(crate) enum Action { InsertHeaders, InsertHeaderNumbers, InsertHeaderTerminalDifficulties, - InsertBlockOmmers, - InsertTransactionSenders, - InsertTransactions, - InsertTransactionHashNumbers, - InsertBlockWithdrawals, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -95,16 +82,6 @@ struct DatabaseProviderMetrics { insert_header_numbers: Histogram, /// Duration of insert header TD insert_header_td: Histogram, - /// Duration of insert block ommers - insert_block_ommers: Histogram, - /// Duration of insert tx senders - insert_tx_senders: Histogram, - /// Duration of insert transactions - insert_transactions: Histogram, - /// Duration of insert transaction hash numbers - insert_tx_hash_numbers: Histogram, - /// Duration of insert block withdrawals - insert_block_withdrawals: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -131,11 +108,6 @@ impl DatabaseProviderMetrics { Action::InsertHeaders => self.insert_headers.record(duration), Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration), Action::InsertHeaderTerminalDifficulties => self.insert_header_td.record(duration), - Action::InsertBlockOmmers => self.insert_block_ommers.record(duration), - Action::InsertTransactionSenders => self.insert_tx_senders.record(duration), - Action::InsertTransactions => self.insert_transactions.record(duration), - Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), - Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index a64bb2578dd..cc50aa35145 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -644,7 +644,7 @@ mod tests { providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, - TransactionsProvider, + StorageLocation, TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; @@ -715,7 +715,10 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!( @@ -733,7 +736,10 @@ mod tests { }; let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); @@ -754,7 +760,10 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4690e27821e..66bc4c05341 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -17,8 +17,8 @@ use crate::{ LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -37,7 +37,7 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, + ShardedKey, StoredBlockBodyIndices, }, table::Table, transaction::{DbTx, DbTxMut}, @@ -52,7 +52,7 @@ use reth_primitives::{ SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; -use reth_primitives_traits::{BlockBody as _, FullNodePrimitives}; +use reth_primitives_traits::{BlockBody as _, FullNodePrimitives, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; @@ -73,10 +73,9 @@ use std::{ fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, - time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -292,7 +291,7 @@ impl DatabaseProvi /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders::Body>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -316,7 +315,7 @@ impl DatabaseProvi writer.append_header(block.header.as_ref(), ttd, &block.hash())?; - self.insert_block(block) + self.insert_block(block, StorageLocation::Database) } } @@ -3137,7 +3136,8 @@ impl BlockWriter /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, + write_transactions_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; @@ -3166,15 +3166,6 @@ impl BlockWriter self.tx.put::(block_number, ttd.into())?; durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); - // insert body ommers data - if !block.body.ommers.is_empty() { - self.tx.put::( - block_number, - StoredBlockOmmers { ommers: block.block.body.ommers }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockOmmers); - } - let mut next_tx_num = self .tx .cursor_read::()? @@ -3184,84 +3175,28 @@ impl BlockWriter durations_recorder.record_relative(metrics::Action::GetNextTxNum); let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; + let tx_count = block.block.body.transactions().len() as u64; // Ensures we have all the senders for the block's transactions. - let mut tx_senders_elapsed = Duration::default(); - let mut transactions_elapsed = Duration::default(); - let mut tx_hash_numbers_elapsed = Duration::default(); - for (transaction, sender) in - block.block.body.transactions.into_iter().zip(block.senders.iter()) + block.block.body.transactions().iter().zip(block.senders.iter()) { - let hash = transaction.hash(); - - if self - .prune_modes - .sender_recovery - .as_ref() - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(next_tx_num, *sender)?; - tx_senders_elapsed += start.elapsed(); - } + let hash = transaction.tx_hash(); - let start = Instant::now(); - self.tx.put::(next_tx_num, transaction.into())?; - let elapsed = start.elapsed(); - if elapsed > Duration::from_secs(1) { - warn!( - target: "providers::db", - ?block_number, - tx_num = %next_tx_num, - hash = %hash, - ?elapsed, - "Transaction insertion took too long" - ); + if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { + self.tx.put::(next_tx_num, *sender)?; } - transactions_elapsed += elapsed; - if self - .prune_modes - .transaction_lookup - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(hash, next_tx_num)?; - tx_hash_numbers_elapsed += start.elapsed(); + if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { + self.tx.put::(*hash, next_tx_num)?; } next_tx_num += 1; } - durations_recorder - .record_duration(metrics::Action::InsertTransactionSenders, tx_senders_elapsed); - durations_recorder - .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); - durations_recorder.record_duration( - metrics::Action::InsertTransactionHashNumbers, - tx_hash_numbers_elapsed, - ); - if let Some(withdrawals) = block.block.body.withdrawals { - if !withdrawals.is_empty() { - self.tx.put::( - block_number, - StoredBlockWithdrawals { withdrawals }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockWithdrawals); - } - } - - let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; - self.tx.put::(block_number, block_indices)?; - durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); - - if !block_indices.is_empty() { - self.tx.put::(block_indices.last_tx_num(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); - } + self.append_block_bodies( + vec![(block_number, Some(block.block.body))], + write_transactions_to, + )?; debug!( target: "providers::db", @@ -3270,33 +3205,83 @@ impl BlockWriter "Inserted block" ); - Ok(block_indices) + Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) } fn append_block_bodies( &self, bodies: Vec<(BlockNumber, Option)>, + write_transactions_to: StorageLocation, ) -> ProviderResult<()> { + let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; + + // Initialize writer if we will be writing transactions to staticfiles + let mut tx_static_writer = write_transactions_to + .static_files() + .then(|| { + self.static_file_provider.get_writer(from_block, StaticFileSegment::Transactions) + }) + .transpose()?; + let mut block_indices_cursor = self.tx.cursor_write::()?; let mut tx_block_cursor = self.tx.cursor_write::()?; + // Initialize cursor if we will be writing transactions to database + let mut tx_cursor = write_transactions_to + .database() + .then(|| { + self.tx.cursor_write::::Transaction, + >>() + }) + .transpose()?; + // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); for (block_number, body) in &bodies { + // Increment block on static file header. + if let Some(writer) = tx_static_writer.as_mut() { + writer.increment_block(*block_number)?; + } + let tx_count = body.as_ref().map(|b| b.transactions().len() as u64).unwrap_or_default(); let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; + let mut durations_recorder = metrics::DurationsRecorder::default(); + // insert block meta block_indices_cursor.append(*block_number, block_indices)?; - next_tx_num += tx_count; + durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); + let Some(body) = body else { continue }; // write transaction block index if !body.transactions().is_empty() { tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); + } + + // write transactions + for transaction in body.transactions() { + if let Some(writer) = tx_static_writer.as_mut() { + writer.append_transaction(next_tx_num, transaction)?; + } + if let Some(cursor) = tx_cursor.as_mut() { + cursor.append(next_tx_num, transaction.clone())?; + } + + // Increment transaction id for each transaction. + next_tx_num += 1; } + + debug!( + target: "providers::db", + ?block_number, + actions = ?durations_recorder.actions, + "Inserted block body" + ); } self.storage.writer().write_block_bodies(self, bodies)?; @@ -3307,7 +3292,7 @@ impl BlockWriter /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, @@ -3326,7 +3311,7 @@ impl BlockWriter // Insert the blocks for block in blocks { - self.insert_block(block)?; + self.insert_block(block, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index d049243377e..d530917909c 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -20,6 +20,7 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB}; @@ -75,7 +76,7 @@ where Self: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: FullNodePrimitives, >, { } @@ -84,7 +85,7 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: FullNodePrimitives, > { } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index a0dae1783ea..c84534e7a5d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; @@ -6,6 +7,29 @@ use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; +/// An enum that represents the storage location for a piece of data. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum StorageLocation { + /// Write only to static files. + StaticFiles, + /// Write only to the database. + Database, + /// Write to both the database and static files. + Both, +} + +impl StorageLocation { + /// Returns true if the storage location includes static files. + pub const fn static_files(&self) -> bool { + matches!(self, Self::StaticFiles | Self::Both) + } + + /// Returns true if the storage location includes the database. + pub const fn database(&self) -> bool { + matches!(self, Self::Database | Self::Both) + } +} + /// BlockExecution Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockExecutionWriter: BlockWriter + Send + Sync { @@ -40,8 +64,11 @@ pub trait BlockWriter: Send + Sync { /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block(&self, block: SealedBlockWithSenders) - -> ProviderResult; + fn insert_block( + &self, + block: SealedBlockWithSenders, + write_transactions_to: StorageLocation, + ) -> ProviderResult; /// Appends a batch of block bodies extending the canonical chain. This is invoked during /// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders` @@ -51,6 +78,7 @@ pub trait BlockWriter: Send + Sync { fn append_block_bodies( &self, bodies: Vec<(BlockNumber, Option)>, + write_transactions_to: StorageLocation, ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and @@ -69,7 +97,7 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 17dea5a6d51..3878cf2a9e3 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -2,7 +2,7 @@ use crate::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, writer::static_file::StaticFileWriter, BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, - StaticFileProviderFactory, TrieWriter, + StaticFileProviderFactory, StorageLocation, TrieWriter, }; use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256, U256}; @@ -15,7 +15,7 @@ use reth_db::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, @@ -148,7 +148,7 @@ impl UnifiedStorageWriter<'_, (), ()> { impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider - + BlockWriter + + BlockWriter + TransactionsProviderExt + StateChangeWriter + TrieWriter @@ -195,7 +195,7 @@ where for block in blocks { let sealed_block = block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - self.database().insert_block(sealed_block)?; + self.database().insert_block(sealed_block, StorageLocation::Both)?; self.save_header_and_transactions(block.block.clone())?; // Write state and changesets to the database. @@ -246,25 +246,8 @@ where .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; } - { - let transactions_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Transactions)?; - let mut storage_writer = - UnifiedStorageWriter::from(self.database(), transactions_writer); - let no_hash_transactions = block - .body - .transactions - .clone() - .into_iter() - .map(TransactionSignedNoHash::from) - .collect(); - storage_writer.append_transactions_from_blocks( - block.header().number, - std::iter::once(&no_hash_transactions), - )?; - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - } + self.database() + .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; Ok(()) } @@ -378,56 +361,6 @@ where Ok(td) } - - /// Appends transactions to static files, using the - /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number - /// when appending to static files. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Transactions segment. - pub fn append_transactions_from_blocks( - &mut self, - initial_block_number: BlockNumber, - transactions: impl Iterator, - ) -> ProviderResult<()> - where - T: Borrow>, - { - self.ensure_static_file_segment(StaticFileSegment::Transactions)?; - - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - let mut last_tx_idx = None; - for (idx, transactions) in transactions.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let mut tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for tx in transactions.borrow() { - self.static_file_mut().append_transaction(tx_index, tx)?; - tx_index += 1; - } - - self.static_file_mut().increment_block(block_number)?; - - // update index - last_tx_idx = Some(tx_index); - } - Ok(()) - } } impl From 80268a1ce775cb8a9955a581526bd5bc2d5a999f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Nov 2024 10:30:21 +0100 Subject: [PATCH 590/970] chore(sdk): move `reth_primitives_traits::TxType` into transaction module (#12722) --- crates/primitives-traits/src/lib.rs | 5 +---- crates/primitives-traits/src/transaction/mod.rs | 1 + crates/primitives-traits/src/{ => transaction}/tx_type.rs | 2 ++ 3 files changed, 4 insertions(+), 4 deletions(-) rename crates/primitives-traits/src/{ => transaction}/tx_type.rs (96%) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 79dff4ae36b..38e83f8ccdf 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -28,6 +28,7 @@ pub mod transaction; pub use transaction::{ execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, + tx_type::{FullTxType, TxType}, FullTransaction, Transaction, TransactionExt, }; @@ -52,10 +53,6 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; -/// Transaction types -pub mod tx_type; -pub use tx_type::{FullTxType, TxType}; - /// Common header types pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 53b77278571..8bd0027a8b2 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -2,6 +2,7 @@ pub mod execute; pub mod signed; +pub mod tx_type; use core::{fmt, hash::Hash}; diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs similarity index 96% rename from crates/primitives-traits/src/tx_type.rs rename to crates/primitives-traits/src/transaction/tx_type.rs index d9ef687759e..dc3dba7fdcf 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -1,3 +1,5 @@ +//! Abstraction of transaction envelope type ID. + use core::fmt; use alloy_primitives::{U64, U8}; From 33730536f5e5de1403f2deb3c7dbe551879b9724 Mon Sep 17 00:00:00 2001 From: Ivan Date: Thu, 21 Nov 2024 12:35:32 +0300 Subject: [PATCH 591/970] Do not print whole block in `debug!` logs (#12729) --- crates/ethereum/payload/src/lib.rs | 2 +- crates/optimism/payload/src/builder.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 4ec1e212c8d..80f6786c404 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -447,7 +447,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 3644d8f71a5..1050a55eb6e 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -424,7 +424,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { From 1b874dcc6c352c05b5e2f1085001a090b8f03448 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 11:14:31 +0100 Subject: [PATCH 592/970] feat: use broadcast tx generic (#12733) --- crates/net/eth-wire-types/src/message.rs | 8 ++++++-- crates/net/network/src/message.rs | 4 ++-- crates/net/network/src/network.rs | 5 ++--- crates/net/network/src/transactions/mod.rs | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 3d34b8cae80..9a866720310 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -182,7 +182,11 @@ pub enum EthMessage { )] NewBlock(Box>), /// Represents a Transactions message broadcast to the network. - Transactions(Transactions), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. NewPooledTransactionHashes66(NewPooledTransactionHashes66), /// Represents a `NewPooledTransactionHashes` message for eth/68 version. @@ -302,7 +306,7 @@ pub enum EthBroadcastMessage { /// Represents a new block broadcast message. NewBlock(Arc>), /// Represents a transactions broadcast message. - Transactions(SharedTransactions), + Transactions(SharedTransactions), } // === impl EthBroadcastMessage === diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index c2511f4e16a..199498b0b4c 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -48,9 +48,9 @@ pub enum PeerMessage { /// Broadcast new block. NewBlock(NewBlockMessage), /// Received transactions _from_ the peer - ReceivedTransaction(Transactions), + ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. - SendTransactions(SharedTransactions), + SendTransactions(SharedTransactions), /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 496b4250ffd..eadeccb1549 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -21,7 +21,6 @@ use reth_network_api::{ use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; -use reth_primitives::TransactionSigned; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; use std::{ @@ -130,7 +129,7 @@ impl NetworkHandle { } /// Send full transactions to the peer - pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { + pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { self.send_message(NetworkHandleMessage::SendTransaction { peer_id, msg: SharedTransactions(msg), @@ -466,7 +465,7 @@ pub(crate) enum NetworkHandleMessage, }, /// Sends a list of transaction hashes to the given peer. SendPooledTransactionHashes { diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index f7a6fb8805e..c3ffea58d01 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1768,7 +1768,7 @@ pub enum NetworkTransactionEvent { /// The ID of the peer from which the transactions were received. peer_id: PeerId, /// The received transactions. - msg: Transactions, + msg: Transactions, }, /// Represents the event of receiving a list of transaction hashes from a peer. IncomingPooledTransactionHashes { From b9169399f303f3d1b50fe467e4ee9a6553a5d2ea Mon Sep 17 00:00:00 2001 From: Jun Song <87601811+syjn99@users.noreply.github.com> Date: Thu, 21 Nov 2024 19:22:42 +0900 Subject: [PATCH 593/970] chore: add pretty printing for pruned segment info (#12710) --- crates/node/events/src/node.rs | 6 +++++- crates/prune/types/src/pruner.rs | 10 +++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 285e28d0f2e..d7f78ba1f7e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -307,7 +307,11 @@ impl NodeState { info!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { - info!(tip_block_number, ?elapsed, ?stats, "Pruner finished"); + let stats = format!( + "[{}]", + stats.iter().map(|item| item.to_string()).collect::>().join(", ") + ); + info!(tip_block_number, ?elapsed, %stats, "Pruner finished"); } } } diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index 3046dda0679..fb907925729 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,5 +1,6 @@ use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; use alloy_primitives::{BlockNumber, TxNumber}; +use derive_more::Display; /// Pruner run output. #[derive(Debug)] @@ -17,7 +18,8 @@ impl From for PrunerOutput { } /// Represents information of a pruner run for a segment. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Display)] +#[display("(table={segment}, pruned={pruned}, status={progress})")] pub struct PrunedSegmentInfo { /// The pruned segment pub segment: PruneSegment, @@ -77,16 +79,18 @@ impl SegmentOutputCheckpoint { } /// Progress of pruning. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneProgress { /// There is more data to prune. + #[display("HasMoreData({_0})")] HasMoreData(PruneInterruptReason), /// Pruning has been finished. + #[display("Finished")] Finished, } /// Reason for interrupting a prune run. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneInterruptReason { /// Prune run timed out. Timeout, From c2e6938606863e5a6f2fe5fb2ef7d993a1a1c69c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 11:41:34 +0100 Subject: [PATCH 594/970] chore: move prune event type (#12732) --- Cargo.lock | 2 +- crates/node/events/Cargo.toml | 2 +- crates/node/events/src/node.rs | 2 +- crates/prune/prune/src/event.rs | 12 ------------ crates/prune/prune/src/lib.rs | 2 -- crates/prune/types/src/event.rs | 22 ++++++++++++++++++++++ crates/prune/types/src/lib.rs | 2 ++ 7 files changed, 27 insertions(+), 17 deletions(-) delete mode 100644 crates/prune/prune/src/event.rs create mode 100644 crates/prune/types/src/event.rs diff --git a/Cargo.lock b/Cargo.lock index 017b84f6e49..02302e14bf7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8122,7 +8122,7 @@ dependencies = [ "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", - "reth-prune", + "reth-prune-types", "reth-stages", "reth-static-file-types", "reth-storage-api", diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 4b4d912a27b..03f3ab17288 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -16,7 +16,7 @@ reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-stages.workspace = true -reth-prune.workspace = true +reth-prune-types.workspace = true reth-static-file-types.workspace = true reth-primitives-traits.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index d7f78ba1f7e..4528bdeaa94 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -9,7 +9,7 @@ use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncP use reth_engine_primitives::ForkchoiceStatus; use reth_network_api::{NetworkEvent, PeersInfo}; use reth_primitives_traits::{format_gas, format_gas_throughput}; -use reth_prune::PrunerEvent; +use reth_prune_types::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; use reth_static_file_types::StaticFileProducerEvent; use std::{ diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs deleted file mode 100644 index 4f5806e592e..00000000000 --- a/crates/prune/prune/src/event.rs +++ /dev/null @@ -1,12 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_prune_types::PrunedSegmentInfo; -use std::time::Duration; - -/// An event emitted by a [Pruner][crate::Pruner]. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum PrunerEvent { - /// Emitted when pruner started running. - Started { tip_block_number: BlockNumber }, - /// Emitted when pruner finished running. - Finished { tip_block_number: BlockNumber, elapsed: Duration, stats: Vec }, -} diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index 5a43afeb502..e6bcbe5e812 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,7 +12,6 @@ mod builder; mod db_ext; mod error; -mod event; mod metrics; mod pruner; pub mod segments; @@ -20,7 +19,6 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; -pub use event::PrunerEvent; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/types/src/event.rs b/crates/prune/types/src/event.rs new file mode 100644 index 00000000000..bac5f0d512c --- /dev/null +++ b/crates/prune/types/src/event.rs @@ -0,0 +1,22 @@ +use crate::PrunedSegmentInfo; +use alloy_primitives::BlockNumber; +use std::time::Duration; + +/// An event emitted by a pruner. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PrunerEvent { + /// Emitted when pruner started running. + Started { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + }, + /// Emitted when pruner finished running. + Finished { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + /// The elapsed time for the pruning process. + elapsed: Duration, + /// Collected pruning stats. + stats: Vec, + }, +} diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 0722e760faf..82a41f0c2b1 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; +mod event; mod limiter; mod mode; mod pruner; @@ -16,6 +17,7 @@ mod segment; mod target; pub use checkpoint::PruneCheckpoint; +pub use event::PrunerEvent; pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use pruner::{ From d00920c42102635b27276b1b767449f74af61975 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 12:08:07 +0100 Subject: [PATCH 595/970] chore: relax tryfrom error (#12735) --- crates/net/eth-wire-types/src/transactions.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 8db96c10042..26f62b7f76a 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -5,7 +5,7 @@ use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{transaction::TransactionConversionError, PooledTransactionsElement}; +use reth_primitives::PooledTransactionsElement; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -60,9 +60,9 @@ impl PooledTransactions { impl TryFrom> for PooledTransactions where - T: TryFrom, + T: TryFrom, { - type Error = TransactionConversionError; + type Error = T::Error; fn try_from(txs: Vec) -> Result { txs.into_iter().map(T::try_from).collect() From ac1867b63113f98ff6c392efd465c4ae6e13998a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 12:22:35 +0100 Subject: [PATCH 596/970] feat: add a way to convert consensus to pooled variant (#12734) --- crates/transaction-pool/src/test_utils/mock.rs | 6 ++++++ crates/transaction-pool/src/traits.rs | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 72304910e15..344781b1f58 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -608,6 +608,12 @@ impl PoolTransaction for MockTransaction { pooled.into() } + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + fn hash(&self) -> &TxHash { self.get_hash() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 23f28cc3fa7..bcde571b07b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -946,6 +946,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { pooled.into() } + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result; + /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -1207,6 +1212,12 @@ impl PoolTransaction for EthPooledTransaction { type Pooled = PooledTransactionsElementEcRecovered; + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + /// Returns hash of the transaction. fn hash(&self) -> &TxHash { self.transaction.hash_ref() From 42aea7b9f688c29da1012fa0c3738df65c840f4c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 21 Nov 2024 11:49:42 +0000 Subject: [PATCH 597/970] feat(trie): retain branch nodes in sparse trie (#12291) --- Cargo.lock | 8 +- Cargo.toml | 4 +- crates/trie/sparse/Cargo.toml | 2 + crates/trie/sparse/src/trie.rs | 646 ++++++++++++++++++++++++--------- 4 files changed, 493 insertions(+), 167 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02302e14bf7..10b7f2fbda5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4590,7 +4590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -9441,7 +9441,9 @@ dependencies = [ "itertools 0.13.0", "pretty_assertions", "proptest", + "proptest-arbitrary-interop", "rand 0.8.5", + "reth-primitives-traits", "reth-testing-utils", "reth-tracing", "reth-trie", @@ -11181,7 +11183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -11672,7 +11674,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e4ca1b7bc28..702bbc3090b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -638,7 +638,7 @@ tracy-client = "0.17.3" #alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } #alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 1c5bb7d8a33..3301975961e 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -27,6 +27,7 @@ smallvec = { workspace = true, features = ["const_new"] } thiserror.workspace = true [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -35,6 +36,7 @@ assert_matches.workspace = true criterion.workspace = true itertools.workspace = true pretty_assertions = "1.4" +proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 696934d3edb..dff29027175 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,17 +1,21 @@ use crate::{SparseTrieError, SparseTrieResult}; -use alloy_primitives::{hex, keccak256, map::HashMap, B256}; +use alloy_primitives::{ + hex, keccak256, + map::{HashMap, HashSet}, + B256, +}; use alloy_rlp::Decodable; use reth_tracing::tracing::debug; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut}, - RlpNode, + BranchNodeCompact, RlpNode, }; use reth_trie_common::{ BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. @@ -21,13 +25,13 @@ pub enum SparseTrie { #[default] Blind, /// The trie nodes have been revealed. - Revealed(RevealedSparseTrie), + Revealed(Box), } impl SparseTrie { /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { - Self::Revealed(RevealedSparseTrie::default()) + Self::Revealed(Box::default()) } /// Returns `true` if the sparse trie has no revealed nodes. @@ -51,7 +55,7 @@ impl SparseTrie { /// Mutable reference to [`RevealedSparseTrie`]. pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { if self.is_blind() { - *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root)?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -87,6 +91,7 @@ pub struct RevealedSparseTrie { prefix_set: PrefixSetMut, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, + updates: Option, } impl fmt::Debug for RevealedSparseTrie { @@ -96,6 +101,7 @@ impl fmt::Debug for RevealedSparseTrie { .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .field("updates", &self.updates) .finish() } } @@ -107,6 +113,7 @@ impl Default for RevealedSparseTrie { values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), + updates: None, } } } @@ -119,11 +126,30 @@ impl RevealedSparseTrie { values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), + updates: None, }; this.reveal_node(Nibbles::default(), node)?; Ok(this) } + /// Makes the sparse trie to store updated branch nodes. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + if retain_updates { + self.updates = Some(SparseTrieUpdates::default()); + } + self + } + + /// Returns a reference to the retained sparse node updates without taking them. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Takes and returns the retained sparse node updates + pub fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } + /// Reveal the trie node only if it was not known already. pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { // TODO: revise all inserts to not overwrite existing entries @@ -146,10 +172,7 @@ impl RevealedSparseTrie { match self.nodes.get(&path) { // Blinded and non-existent nodes can be replaced. Some(SparseNode::Hash(_)) | None => { - self.nodes.insert( - path, - SparseNode::Branch { state_mask: branch.state_mask, hash: None }, - ); + self.nodes.insert(path, SparseNode::new_branch(branch.state_mask)); } // Branch node already exists, or an extension node was placed where a // branch node was before. @@ -165,7 +188,7 @@ impl RevealedSparseTrie { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(&ext.key); self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + self.nodes.insert(path, SparseNode::new_ext(ext.key)); } // Extension node already exists, or an extension node was placed where a branch // node was before. @@ -390,7 +413,7 @@ impl RevealedSparseTrie { SparseNode::Branch { .. } => removed_node.node, } } - SparseNode::Branch { mut state_mask, hash: _ } => { + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { // If the node is a branch node, we need to check the number of children left // after deleting the child at the given nibble. @@ -452,6 +475,10 @@ impl RevealedSparseTrie { self.nodes.remove(&child_path); } + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + new_node } // If more than one child is left set in the branch, we just re-insert it @@ -558,11 +585,11 @@ impl RevealedSparseTrie { pub fn root(&mut self) -> B256 { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); - if let Some(root_hash) = root_rlp.as_hash() { + let rlp_node = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { root_hash } else { - keccak256(root_rlp) + keccak256(rlp_node) } } @@ -608,7 +635,7 @@ impl RevealedSparseTrie { paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, hash } => { + SparseNode::Branch { state_mask, hash, .. } => { if hash.is_some() && !prefix_set.contains(&path) { continue } @@ -644,48 +671,70 @@ impl RevealedSparseTrie { let mut prefix_set_contains = |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - let rlp_node = match self.nodes.get_mut(&path).unwrap() { - SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), - SparseNode::Hash(hash) => RlpNode::word_rlp(hash), + let (rlp_node, calculated, node_type) = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => { + (RlpNode::word_rlp(&EMPTY_ROOT_HASH), false, SparseNodeType::Empty) + } + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) + (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + (rlp_node, true, SparseNodeType::Leaf) } } SparseNode::Extension { key, hash } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) + ( + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Extension { store_in_db_trie: true }, + ) } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + let (_, child, _, node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + ( + rlp_node, + true, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: node_type.store_in_db_trie(), + }, + ) } else { // need to get rlp node for child first buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } - SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(( + path, + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Branch { store_in_db_trie }, + )); continue } buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack. + // from the stack and keep walking in the sorted order. for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); @@ -698,11 +747,43 @@ impl RevealedSparseTrie { .branch_value_stack_buf .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; + + // TODO(alexey): set the `TrieMask` bits directly + let mut tree_mask_values = Vec::new(); + let mut hash_mask_values = Vec::new(); + let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); - // Insert children in the resulting buffer in a normal order, because - // initially we iterated in reverse. + let (_, child, calculated, node_type) = + buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if self.updates.is_some() { + // Set the trie mask + if node_type.store_in_db_trie() { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + tree_mask_values.push(true); + } else { + // Set the flag according to whether a child node was + // pre-calculated + // (`calculated = false`), meaning that it wasn't in the + // database + tree_mask_values.push(!calculated); + } + + // Set the hash mask. If a child node has a hash value AND is a + // branch node, set the hash mask + // and save the hash. + let hash = child.as_hash().filter(|_| node_type.is_branch()); + hash_mask_values.push(hash.is_some()); + if let Some(hash) = hash { + hashes.push(hash); + } + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. buffers.branch_value_stack_buf [buffers.branch_child_buf.len() - i - 1] = child; added_children = true; @@ -717,21 +798,101 @@ impl RevealedSparseTrie { } self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) - .rlp(&mut self.rlp_buf); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut() { + let mut tree_mask_values = tree_mask_values.into_iter().rev(); + let mut hash_mask_values = hash_mask_values.into_iter().rev(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + for (i, child) in branch_node_ref.children() { + if child.is_some() { + if hash_mask_values.next().unwrap() { + hash_mask.set_bit(i); + } + if tree_mask_values.next().unwrap() { + tree_mask.set_bit(i); + } + } + } + + // Store in DB trie if there are either any children that are stored in the + // DB trie, or any children represent hashed values + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.len() == 0), + ); + updates.updated_nodes.insert(path.clone(), branch_node); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + true, + SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }, + ) } }; - buffers.rlp_node_stack.push((path, rlp_node)); + buffers.rlp_node_stack.push((path, rlp_node, calculated, node_type)); } + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); buffers.rlp_node_stack.pop().unwrap().1 } } +/// Enum representing sparse trie node type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum SparseNodeType { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash, + /// Sparse leaf node. + Leaf, + /// Sparse extension node. + Extension { + /// A flag indicating whether the extension node should be stored in the database. + store_in_db_trie: bool, + }, + /// Sparse branch node. + Branch { + /// A flag indicating whether the branch node should be stored in the database. + store_in_db_trie: bool, + }, +} + +impl SparseNodeType { + const fn is_branch(&self) -> bool { + matches!(self, Self::Branch { .. }) + } + + const fn store_in_db_trie(&self) -> bool { + match *self { + Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { + store_in_db_trie + } + _ => false, + } + } +} + /// Enum representing trie nodes in sparse trie. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum SparseNode { /// Empty trie node. Empty, @@ -760,6 +921,9 @@ pub enum SparseNode { /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. hash: Option, + /// Pre-computed flag indicating whether the trie node should be stored in the database. + /// Can be reused unless this trie path has been updated. + store_in_db_trie: Option, }, } @@ -776,7 +940,7 @@ impl SparseNode { /// Create new [`SparseNode::Branch`] from state mask. pub const fn new_branch(state_mask: TrieMask) -> Self { - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Branch`] with two bits set. @@ -785,7 +949,7 @@ impl SparseNode { // set bits for both children (1u16 << bit_a) | (1u16 << bit_b), ); - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Extension`] from the key slice. @@ -812,7 +976,7 @@ struct RlpNodeBuffers { /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. path_stack: Vec<(Nibbles, Option)>, /// Stack of rlp nodes - rlp_node_stack: Vec<(Nibbles, RlpNode)>, + rlp_node_stack: Vec<(Nibbles, RlpNode, bool, SparseNodeType)>, /// Reusable branch child path branch_child_buf: SmallVec<[Nibbles; 16]>, /// Reusable branch value stack @@ -831,37 +995,101 @@ impl RlpNodeBuffers { } } +/// The aggregation of sparse trie updates. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + updated_nodes: HashMap, + removed_nodes: HashSet, +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; use super::*; use alloy_primitives::{map::HashSet, U256}; + use alloy_rlp::Encodable; use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; + use proptest_arbitrary_interop::arb; use rand::seq::IteratorRandom; - use reth_trie::{BranchNode, ExtensionNode, LeafNode}; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::noop::NoopAccountTrieCursor, + walker::TrieWalker, + BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, + }; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, HashBuilder, }; + /// Pad nibbles to the length of a B256 hash with zeros on the left. + fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked(vec![0; B256::len_bytes() * 2 - nibbles.len()]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend_from_slice_unchecked(&vec![0; B256::len_bytes() * 2 - nibbles.len()]); + nibbles + } + /// Calculate the state root by feeding the provided state to the hash builder and retaining the /// proofs for the provided targets. /// /// Returns the state root and the retained proof nodes. - fn hash_builder_root_with_proofs>( - state: impl IntoIterator, + fn run_hash_builder( + state: impl IntoIterator + Clone, proof_targets: impl IntoIterator, - ) -> (B256, ProofNodes) { - let mut hash_builder = - HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter(proof_targets)); - for (key, value) in state { - hash_builder.add_leaf(key, value.as_ref()); + ) -> HashBuilder { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + let walker = TrieWalker::new(NoopAccountTrieCursor::default(), prefix_set.freeze()) + .with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } } - (hash_builder.root(), hash_builder.take_proof_nodes()) + hash_builder.root(); + + hash_builder } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -915,58 +1143,80 @@ mod tests { #[test] fn sparse_trie_empty_update_one() { - let path = Nibbles::unpack(B256::with_last_byte(42)); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs([(path.clone(), &value)], [path.clone()]); + let mut hash_builder = run_hash_builder([(key.clone(), value())], [key.clone()]); - let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(path, value.to_vec()).unwrap(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded()).unwrap(); let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_lower_nibbles() { + reth_tracing::init_test_tracing(); + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_upper_nibbles() { let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -980,55 +1230,79 @@ mod tests { }) }) .collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder.updated_branch_nodes.take().unwrap()) + ); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_repeated() { let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((old_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((new_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| old_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), old_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.updates_ref(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| new_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), paths.clone(), ); for path in &paths { - sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -1311,30 +1585,43 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: Vec<(HashMap>, HashSet)>) { + fn test(updates: Vec<(HashMap, HashSet)>) { { let mut state = BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { // Insert state updates into the sparse trie and calculate the root - for (key, value) in update.clone() { - sparse.update_leaf(key, value).unwrap(); + for (key, account) in update.clone() { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); // Insert state updates into the hash builder and calculate the root state.extend(update); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); // Delete some keys from both the hash builder and the sparse trie and check // that the sparse trie root still matches the hash builder root @@ -1343,34 +1630,36 @@ mod tests { sparse.remove_leaf(&key).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); } } } - /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. - fn pad_nibbles(nibbles: Nibbles) -> Nibbles { - let mut base = - Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); - base.extend_from_slice_unchecked(&nibbles); - base - } - fn transform_updates( - updates: Vec>>, + updates: Vec>, mut rng: impl Rng, - ) -> Vec<(HashMap>, HashSet)> { + ) -> Vec<(HashMap, HashSet)> { let mut keys = HashSet::new(); updates .into_iter() @@ -1393,8 +1682,8 @@ mod tests { proptest!(ProptestConfig::with_cases(10), |( updates in proptest::collection::vec( proptest::collection::hash_map( - any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), - any::>(), + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), 1..100, ).prop_map(HashMap::from_iter), 1..100, @@ -1417,24 +1706,28 @@ mod tests { /// replacing it. #[test] fn sparse_trie_reveal_node_1() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key3(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1446,7 +1739,7 @@ mod tests { ); // Insert the leaf for the second key - sparse.update_leaf(key2(), value().to_vec()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); // Check that the branch node was updated and another nibble was set assert_eq!( @@ -1455,8 +1748,8 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes_3) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + let proof_nodes_3 = + run_hash_builder([(key1(), value()), (key3(), value())], [key3()]).take_proof_nodes(); for (path, node) in proof_nodes_3.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1469,10 +1762,11 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), key2(), key3()], - ); + ) + .take_proof_nodes(); assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); } @@ -1489,16 +1783,17 @@ mod tests { /// into an extension node, so it should ignore this node. #[test] fn sparse_trie_reveal_node_2() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [Nibbles::default()], - ); + ) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) @@ -1506,10 +1801,11 @@ mod tests { // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ); + ) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1530,10 +1826,9 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value()), (key3(), value())], - [key2()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value()), (key3(), value())], [key2()]) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1555,16 +1850,20 @@ mod tests { /// overwritten with the extension node from the proof. #[test] fn sparse_trie_reveal_node_3() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); - let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) @@ -1577,17 +1876,17 @@ mod tests { ); // Insert the leaf with a different prefix - sparse.update_leaf(key3(), value().to_vec()).unwrap(); + sparse.update_leaf(key3(), value_encoded()).unwrap(); // Check that the extension node was turned into a branch node assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1595,7 +1894,7 @@ mod tests { // Check that the branch node wasn't overwritten by the extension node in the proof assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); } @@ -1671,4 +1970,27 @@ mod tests { ] ); } + + #[test] + fn hash_builder_branch_hash_mask() { + let key1 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x01])); + let value = || Account { bytecode_hash: Some(B256::repeat_byte(1)), ..Default::default() }; + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + let mut hash_builder = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]); + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(key1(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + } } From fa7ad036ea5c23bf4322e9d0e516cf0e99a3af1f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 14:27:02 +0100 Subject: [PATCH 598/970] chore: force disable 4844 for op pool (#12740) --- crates/optimism/node/src/node.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 6cdffd09059..bdc3d0d3a44 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -282,6 +282,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(Arc::new( ctx.chain_spec().inner.clone(), )) + .no_eip4844() .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks( From 4eca2fa1eea6450bc197554afb03a984095a9aca Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 14:28:08 +0100 Subject: [PATCH 599/970] chore: rm network event handling from node events (#12736) --- bin/reth/src/commands/debug_cmd/execution.rs | 11 +++-------- crates/node/builder/src/launch/engine.rs | 3 +-- crates/node/builder/src/launch/mod.rs | 4 +--- crates/node/events/src/node.rs | 19 +------------------ 4 files changed, 6 insertions(+), 31 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 0210142be71..dd060ac2ab6 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -4,7 +4,7 @@ use crate::{args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; -use futures::{stream::select as stream_select, StreamExt}; +use futures::StreamExt; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; @@ -19,7 +19,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; +use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; @@ -207,17 +207,12 @@ impl> Command { return Ok(()) } - let pipeline_events = pipeline.events(); - let events = stream_select( - network.event_listener().map(Into::into), - pipeline_events.map(Into::into), - ); ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( Some(Box::new(network)), latest_block_number, - events, + pipeline.events().map(Into::into), ), ); diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 5a8405047b0..f485be2c22d 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -17,7 +17,7 @@ use reth_engine_tree::{ use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; -use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; +use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ BuiltPayload, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, @@ -256,7 +256,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index c4146f48306..be317e4be31 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -21,7 +21,7 @@ use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; +use reth_network::BlockDownloaderProvider; use reth_node_api::{AddOnsContext, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -262,8 +262,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 4528bdeaa94..edd85501ec0 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -7,7 +7,7 @@ use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; use reth_engine_primitives::ForkchoiceStatus; -use reth_network_api::{NetworkEvent, PeersInfo}; +use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune_types::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; @@ -211,12 +211,6 @@ impl NodeState { } } - fn handle_network_event(&self, _: NetworkEvent) { - // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already - // logged in the networking component. I kept this stub in case we want to catch other - // networking events later on. - } - fn handle_consensus_engine_event(&mut self, event: BeaconConsensusEngineEvent) { match event { BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status) => { @@ -358,8 +352,6 @@ struct CurrentStage { /// A node event. #[derive(Debug)] pub enum NodeEvent { - /// A network event. - Network(NetworkEvent), /// A sync pipeline event. Pipeline(PipelineEvent), /// A consensus engine event. @@ -375,12 +367,6 @@ pub enum NodeEvent { Other(String), } -impl From for NodeEvent { - fn from(event: NetworkEvent) -> Self { - Self::Network(event) - } -} - impl From for NodeEvent { fn from(event: PipelineEvent) -> Self { Self::Pipeline(event) @@ -527,9 +513,6 @@ where while let Poll::Ready(Some(event)) = this.events.as_mut().poll_next(cx) { match event { - NodeEvent::Network(event) => { - this.state.handle_network_event(event); - } NodeEvent::Pipeline(event) => { this.state.handle_pipeline_event(event); } From 9fbe3468e842e62e3968d3a3e80d87c9db1e755b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 14:29:09 +0100 Subject: [PATCH 600/970] chore: use TransactionSigned trait bound for tx msg building (#12737) --- crates/net/network/src/transactions/mod.rs | 42 ++++++++++++---------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index c3ffea58d01..2a5496deead 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -49,6 +49,7 @@ use reth_network_p2p::{ use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; use reth_primitives::{PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives_traits::{SignedTransaction, TransactionExt, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, @@ -1455,13 +1456,7 @@ struct PropagateTransaction { transaction: Arc, } -// === impl PropagateTransaction === - impl PropagateTransaction { - fn hash(&self) -> TxHash { - self.transaction.hash() - } - /// Create a new instance from a pooled transaction fn new(tx: Arc>) -> Self where @@ -1475,10 +1470,16 @@ impl PropagateTransaction { } } +impl PropagateTransaction { + fn hash(&self) -> TxHash { + *self.transaction.tx_hash() + } +} + /// Helper type to construct the appropriate message to send to the peer based on whether the peer /// should receive them in full or as pooled #[derive(Debug, Clone)] -enum PropagateTransactionsBuilder { +enum PropagateTransactionsBuilder { Pooled(PooledTransactionsHashesBuilder), Full(FullTransactionsBuilder), } @@ -1513,16 +1514,16 @@ impl PropagateTransactionsBuilder { } } -impl PropagateTransactionsBuilder { +impl PropagateTransactionsBuilder { /// Appends all transactions - fn extend<'a>(&mut self, txs: impl IntoIterator) { + fn extend<'a>(&mut self, txs: impl IntoIterator>) { for tx in txs { self.push(tx); } } /// Appends a transaction to the list. - fn push(&mut self, transaction: &PropagateTransaction) { + fn push(&mut self, transaction: &PropagateTransaction) { match self { Self::Pooled(builder) => builder.push(transaction), Self::Full(builder) => builder.push(transaction), @@ -1531,7 +1532,7 @@ impl PropagateTransactionsBuilder { } /// Represents how the transactions should be sent to a peer if any. -struct PropagateTransactions { +struct PropagateTransactions { /// The pooled transaction hashes to send. pooled: Option, /// The transactions to send in full. @@ -1543,7 +1544,7 @@ struct PropagateTransactions { /// and enforces other propagation rules for EIP-4844 and tracks those transactions that can't be /// broadcasted in full. #[derive(Debug, Clone)] -struct FullTransactionsBuilder { +struct FullTransactionsBuilder { /// The soft limit to enforce for a single broadcast message of full transactions. total_size: usize, /// All transactions to be broadcasted. @@ -1575,9 +1576,9 @@ impl FullTransactionsBuilder { } } -impl FullTransactionsBuilder { +impl FullTransactionsBuilder { /// Appends all transactions. - fn extend(&mut self, txs: impl IntoIterator) { + fn extend(&mut self, txs: impl IntoIterator>) { for tx in txs { self.push(&tx) } @@ -1591,7 +1592,7 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). - fn push(&mut self, transaction: &PropagateTransaction) { + fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // // Nodes MUST NOT automatically broadcast blob transactions to their peers. @@ -1600,7 +1601,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if transaction.transaction.is_eip4844() { + if transaction.transaction.transaction().tx_type().is_eip4844() { self.pooled.push(transaction); return } @@ -1651,19 +1652,22 @@ impl PooledTransactionsHashesBuilder { } /// Appends all hashes - fn extend(&mut self, txs: impl IntoIterator) { + fn extend( + &mut self, + txs: impl IntoIterator>, + ) { for tx in txs { self.push(&tx); } } - fn push(&mut self, tx: &PropagateTransaction) { + fn push(&mut self, tx: &PropagateTransaction) { match self { Self::Eth66(msg) => msg.0.push(tx.hash()), Self::Eth68(msg) => { msg.hashes.push(tx.hash()); msg.sizes.push(tx.size); - msg.types.push(tx.transaction.tx_type().into()); + msg.types.push(tx.transaction.transaction().tx_type().into()); } } } From 4f946733c356613cb155e9a64abcff8251696823 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 14:30:04 +0100 Subject: [PATCH 601/970] feat: add is_broadcastable_in_full to txtype (#12739) --- crates/primitives-traits/src/transaction/tx_type.rs | 10 ++++++++++ crates/primitives/src/transaction/tx_type.rs | 8 ++++++++ 2 files changed, 18 insertions(+) diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index dc3dba7fdcf..866242098d3 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -49,4 +49,14 @@ pub trait TxType: /// Returns `true` if this is an eip-7702 transaction. fn is_eip7702(&self) -> bool; + + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the + /// network. + /// + /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as + /// hashes, e.g. because they missing context (e.g. blob sidecar). + fn is_broadcastable_in_full(&self) -> bool { + // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. + !self.is_eip4844() + } } diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 597487564df..0e344374d20 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -257,8 +257,16 @@ mod tests { use super::*; use alloy_primitives::hex; use reth_codecs::{txtype::*, Compact}; + use reth_primitives_traits::TxType as _; use rstest::rstest; + #[test] + fn is_broadcastable() { + assert!(TxType::Legacy.is_broadcastable_in_full()); + assert!(TxType::Eip1559.is_broadcastable_in_full()); + assert!(!TxType::Eip4844.is_broadcastable_in_full()); + } + #[rstest] #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] From 9d3f8cc6a27e59396ede209f3d439eb334e24eed Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 14:31:03 +0100 Subject: [PATCH 602/970] docs: add additional eth validator docs (#12742) --- crates/transaction-pool/src/validate/eth.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 70298487694..ca745222575 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -107,6 +107,19 @@ where } /// A [`TransactionValidator`] implementation that validates ethereum transaction. +/// +/// It supports all known ethereum transaction types: +/// - Legacy +/// - EIP-2718 +/// - EIP-1559 +/// - EIP-4844 +/// - EIP-7702 +/// +/// And enforces additional constraints such as: +/// - Maximum transaction size +/// - Maximum gas limit +/// +/// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain From c73dadacb2c0d324c17fa690807a0f77722e6ed2 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 21 Nov 2024 19:20:29 +0400 Subject: [PATCH 603/970] refactor: unify code paths for trie unwind (#12741) --- bin/reth/src/commands/debug_cmd/execution.rs | 9 +- .../src/providers/database/provider.rs | 241 +++++++----------- 2 files changed, 93 insertions(+), 157 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index dd060ac2ab6..efe4a2f7c22 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -25,8 +25,7 @@ use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, ChainSpecProvider, ProviderFactory, - StageCheckpointReader, + providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{ @@ -230,11 +229,7 @@ impl> Command { trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); // Unwind the pipeline without committing. - { - provider_factory - .provider_rw()? - .take_block_and_execution_range(next_block..=target_block)?; - } + provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; // Update latest block current_max_block = target_block; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 66bc4c05341..92cc8df2f5c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -243,6 +243,95 @@ impl AsRef for DatabaseProvider { } } +impl DatabaseProvider { + /// Unwinds trie state for the given range. + /// + /// This includes calculating the resulted state root and comparing it with the parent block + /// state root. + pub fn unwind_trie_state_range( + &self, + range: RangeInclusive, + ) -> ProviderResult<()> { + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; + + // Unwind account hashes. Add changed accounts to account prefix set. + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; + let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { + destroyed_accounts.insert(hashed_address); + } + } + + // Unwind account history indices. + self.unwind_account_history_indices(changed_accounts.iter())?; + let storage_range = BlockNumberAddress::range(range.clone()); + + let changed_storages = self + .tx + .cursor_read::()? + .walk_range(storage_range)? + .collect::, _>>()?; + + // Unwind storage hashes. Add changed account and storage keys to corresponding prefix + // sets. + let mut storage_prefix_sets = HashMap::::default(); + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); + for slot in hashed_slots { + storage_prefix_set.insert(Nibbles::unpack(slot)); + } + storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); + } + + // Unwind storage history indices. + self.unwind_storage_history_indices(changed_storages.iter().copied())?; + + // Calculate the reverted merkle root. + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let prefix_sets = TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + storage_prefix_sets, + destroyed_accounts, + }; + let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) + .with_prefix_sets(prefix_sets) + .root_with_updates() + .map_err(Into::::into)?; + + let parent_number = range.start().saturating_sub(1); + let parent_state_root = self + .header_by_number(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? + .state_root; + + // state root should be always correct as we are reverting state. + // but for sake of double verification we will check it again. + if new_state_root != parent_state_root { + let parent_hash = self + .block_hash(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; + return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { + root: GotExpected { got: new_state_root, expected: parent_state_root }, + block_number: parent_number, + block_hash: parent_hash, + }))) + } + self.write_trie_updates(&trie_updates)?; + + Ok(()) + } +} + impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, @@ -2913,81 +3002,7 @@ impl BlockExecutio &self, range: RangeInclusive, ) -> ProviderResult { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); - - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; + self.unwind_trie_state_range(range.clone())?; // get blocks let blocks = self.take_block_range(range.clone())?; @@ -3012,81 +3027,7 @@ impl BlockExecutio &self, range: RangeInclusive, ) -> ProviderResult<()> { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_range = BlockNumberAddress::range(range.clone()); - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; + self.unwind_trie_state_range(range.clone())?; // get blocks let blocks = self.take_block_range(range.clone())?; From 54ff4c73498c2b8e774eecd7b5e19fcbf54673c7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 16:35:43 +0100 Subject: [PATCH 604/970] feat: relax more tx manager bounds (#12744) --- crates/net/network/src/transactions/mod.rs | 322 +++++++++++---------- crates/optimism/node/src/txpool.rs | 7 +- crates/transaction-pool/src/traits.rs | 7 +- 3 files changed, 180 insertions(+), 156 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 2a5496deead..a4eef2fa99c 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -48,7 +48,7 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; use reth_primitives_traits::{SignedTransaction, TransactionExt, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ @@ -678,40 +678,13 @@ where } } -impl TransactionsManager +impl TransactionsManager where - Pool: TransactionPool + 'static, + Pool: TransactionPool, + N: NetworkPrimitives, + <::Transaction as PoolTransaction>::Consensus: + Into, { - /// Request handler for an incoming request for transactions - fn on_get_pooled_transactions( - &mut self, - peer_id: PeerId, - request: GetPooledTransactions, - response: oneshot::Sender>, - ) { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if self.network.tx_gossip_disabled() { - let _ = response.send(Ok(PooledTransactions::default())); - return - } - let transactions = self.pool.get_pooled_transaction_elements( - request.0, - GetPooledTransactionLimit::ResponseSizeSoftLimit( - self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, - ), - ); - - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); - - // we sent a response at which point we assume that the peer is aware of the - // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); - - let resp = PooledTransactions(transactions); - let _ = response.send(Ok(resp)); - } - } - /// Invoked when transactions in the local mempool are considered __pending__. /// /// When a transaction in the local mempool is moved to the pending pool, we propagate them to @@ -737,110 +710,6 @@ where self.propagate_all(hashes); } - /// Propagates the given transactions to the peers - /// - /// This fetches all transaction from the pool, including the 4844 blob transactions but - /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. - fn propagate_all(&mut self, hashes: Vec) { - let propagated = self.propagate_transactions( - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), - PropagationMode::Basic, - ); - - // notify pool so events get fired - self.pool.on_propagated(propagated); - } - - /// Propagate the transactions to all connected peers either as full objects or hashes. - /// - /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [`NewPooledTransactionHashes`] - /// - /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . - fn propagate_transactions( - &mut self, - to_propagate: Vec, - propagation_mode: PropagationMode, - ) -> PropagatedTransactions { - let mut propagated = PropagatedTransactions::default(); - if self.network.tx_gossip_disabled() { - return propagated - } - - // send full transactions to a set of the connected peers based on the configured mode - let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); - - // Note: Assuming ~random~ order due to random state of the peers map hasher - for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { - // determine whether to send full tx objects or hashes. - let mut builder = if peer_idx > max_num_full { - PropagateTransactionsBuilder::pooled(peer.version) - } else { - PropagateTransactionsBuilder::full(peer.version) - }; - - if propagation_mode.is_forced() { - builder.extend(to_propagate.iter()); - } else { - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to - // the peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen - // transactions - if !peer.seen_transactions.contains(&tx.hash()) { - builder.push(tx); - } - } - } - - if builder.is_empty() { - trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); - continue - } - - let PropagateTransactions { pooled, full } = builder.build(); - - // send hashes if any - if let Some(mut new_pooled_hashes) = pooled { - // enforce tx soft limit per message for the (unlikely) event the number of - // hashes exceeds it - new_pooled_hashes - .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); - - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(hash); - } - - trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); - - // send hashes of transactions - self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); - } - - // send full transactions, if any - if let Some(new_full_transactions) = full { - for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); - } - - trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); - - // send full transactions - self.network.send_transactions(*peer_id, new_full_transactions); - } - } - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - propagated - } - /// Propagate the full transactions to a specific peer. /// /// Returns the propagated transactions. @@ -896,9 +765,9 @@ where // send full transactions, if any if let Some(new_full_transactions) = full { for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(peer_id)); + propagated.0.entry(*tx.tx_hash()).or_default().push(PropagateKind::Full(peer_id)); // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); + peer.seen_transactions.insert(*tx.tx_hash()); } // send full transactions @@ -930,8 +799,12 @@ where return }; - let to_propagate: Vec = - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); + let to_propagate = self + .pool + .get_all(hashes) + .into_iter() + .map(PropagateTransaction::new) + .collect::>(); let mut propagated = PropagatedTransactions::default(); @@ -975,6 +848,150 @@ where self.pool.on_propagated(propagated); } + /// Propagate the transactions to all connected peers either as full objects or hashes. + /// + /// The message for new pooled hashes depends on the negotiated version of the stream. + /// See [`NewPooledTransactionHashes`] + /// + /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . + fn propagate_transactions( + &mut self, + to_propagate: Vec>, + propagation_mode: PropagationMode, + ) -> PropagatedTransactions { + let mut propagated = PropagatedTransactions::default(); + if self.network.tx_gossip_disabled() { + return propagated + } + + // send full transactions to a set of the connected peers based on the configured mode + let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); + + // Note: Assuming ~random~ order due to random state of the peers map hasher + for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { + // determine whether to send full tx objects or hashes. + let mut builder = if peer_idx > max_num_full { + PropagateTransactionsBuilder::pooled(peer.version) + } else { + PropagateTransactionsBuilder::full(peer.version) + }; + + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(&tx.hash()) { + builder.push(tx); + } + } + } + + if builder.is_empty() { + trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); + continue + } + + let PropagateTransactions { pooled, full } = builder.build(); + + // send hashes if any + if let Some(mut new_pooled_hashes) = pooled { + // enforce tx soft limit per message for the (unlikely) event the number of + // hashes exceeds it + new_pooled_hashes + .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); + + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(hash); + } + + trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); + + // send hashes of transactions + self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); + } + + // send full transactions, if any + if let Some(new_full_transactions) = full { + for tx in &new_full_transactions { + propagated + .0 + .entry(*tx.tx_hash()) + .or_default() + .push(PropagateKind::Full(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(*tx.tx_hash()); + } + + trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); + + // send full transactions + self.network.send_transactions(*peer_id, new_full_transactions); + } + } + + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + propagated + } + + /// Propagates the given transactions to the peers + /// + /// This fetches all transaction from the pool, including the 4844 blob transactions but + /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + fn propagate_all(&mut self, hashes: Vec) { + let propagated = self.propagate_transactions( + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, + ); + + // notify pool so events get fired + self.pool.on_propagated(propagated); + } +} + +impl TransactionsManager +where + Pool: TransactionPool + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, +{ + /// Request handler for an incoming request for transactions + fn on_get_pooled_transactions( + &mut self, + peer_id: PeerId, + request: GetPooledTransactions, + response: oneshot::Sender>, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if self.network.tx_gossip_disabled() { + let _ = response.send(Ok(PooledTransactions::default())); + return + } + let transactions = self.pool.get_pooled_transaction_elements( + request.0, + GetPooledTransactionLimit::ResponseSizeSoftLimit( + self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, + ), + ); + + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); + + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); + + let resp = PooledTransactions(transactions); + let _ = response.send(Ok(resp)); + } + } + /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { @@ -1273,6 +1290,7 @@ where impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, { type Output = (); @@ -1456,21 +1474,18 @@ struct PropagateTransaction { transaction: Arc, } -impl PropagateTransaction { +impl PropagateTransaction { /// Create a new instance from a pooled transaction - fn new(tx: Arc>) -> Self + fn new

(tx: Arc>) -> Self where - T: PoolTransaction>, + P: PoolTransaction>, { let size = tx.encoded_length(); - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - let transaction = Arc::new(recovered.into_signed()); + let transaction = tx.transaction.clone().into_consensus().into(); + let transaction = Arc::new(transaction); Self { size, transaction } } -} -impl PropagateTransaction { fn hash(&self) -> TxHash { *self.transaction.tx_hash() } @@ -2372,7 +2387,8 @@ mod tests { #[test] fn test_transaction_builder_empty() { - let mut builder = PropagateTransactionsBuilder::pooled(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::pooled(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2388,7 +2404,8 @@ mod tests { #[test] fn test_transaction_builder_large() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2416,7 +2433,8 @@ mod tests { #[test] fn test_transaction_builder_eip4844() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 0edfeec7322..7df5888fb75 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -3,7 +3,9 @@ use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{ + Block, GotExpected, InvalidTransactionError, SealedBlock, TransactionSigned, +}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; use reth_transaction_pool::{ @@ -140,7 +142,8 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().into().encode_2718(&mut encoded); + let tx: TransactionSigned = valid_tx.transaction().clone().into_consensus().into(); + tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index bcde571b07b..68a911f2e2e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -20,7 +20,8 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -1068,7 +1069,9 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Super trait for transactions that can be converted to and from Eth transactions pub trait EthPoolTransaction: PoolTransaction< - Consensus: From + Into, + Consensus: From + + Into + + Into, Pooled: From + Into, > { From 96f7572404321c718940360cf8b7423f5e5b7615 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 21 Nov 2024 15:52:38 +0000 Subject: [PATCH 605/970] chore(net): downgrade pending sesion timeout log to trace (#12745) --- crates/net/network/src/session/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 816c540cee2..a020c540e38 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -807,7 +807,7 @@ pub(crate) async fn pending_session_with_timeout( F: Future, { if tokio::time::timeout(timeout, f).await.is_err() { - debug!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); + trace!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); let event = PendingSessionEvent::Disconnected { remote_addr, session_id, From ad7885b48cbc8b6270cf10c2615ad9fbeccb28ef Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Nov 2024 17:10:06 +0100 Subject: [PATCH 606/970] chore(sdk): Define helper trait `MaybeCompact` (#12683) --- Cargo.toml | 1 + crates/primitives-traits/Cargo.toml | 26 +++++--- crates/primitives-traits/src/account.rs | 59 ++++++++++++------- crates/primitives-traits/src/block/body.rs | 1 - crates/primitives-traits/src/block/header.rs | 7 +-- crates/primitives-traits/src/block/mod.rs | 8 +-- crates/primitives-traits/src/header/sealed.rs | 17 +++--- crates/primitives-traits/src/integer_list.rs | 27 +++++---- crates/primitives-traits/src/lib.rs | 14 +++++ crates/primitives-traits/src/receipt.rs | 11 ++-- crates/primitives-traits/src/storage.rs | 10 ++-- .../primitives-traits/src/transaction/mod.rs | 7 +-- .../src/transaction/signed.rs | 11 ++-- .../src/transaction/tx_type.rs | 7 +-- crates/primitives/Cargo.toml | 7 ++- crates/storage/db-api/Cargo.toml | 2 +- crates/storage/db-models/Cargo.toml | 2 +- crates/storage/db/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 2 +- 19 files changed, 134 insertions(+), 87 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 702bbc3090b..ad17ea4ad0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -528,6 +528,7 @@ tracing = "0.1.0" tracing-appender = "0.2" url = "2.3" zstd = "0.13" +byteorder = "1" # metrics metrics = "0.24.0" diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 20430fbc882..b686a2e98ba 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true @@ -24,16 +24,16 @@ alloy-rlp.workspace = true revm-primitives.workspace = true # misc -byteorder = "1" +byteorder = { workspace = true, optional = true } +bytes.workspace = true derive_more.workspace = true roaring = "0.10.2" serde_with = { workspace = true, optional = true } auto_impl.workspace = true # required by reth-codecs -bytes.workspace = true -modular-bitfield.workspace = true -serde.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, optional = true} # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -50,6 +50,8 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +modular-bitfield.workspace = true +serde.workspace = true [features] default = ["std"] @@ -59,11 +61,11 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "serde/std" + "serde?/std" ] test-utils = [ "arbitrary", - "reth-codecs/test-utils" + "reth-codecs?/test-utils" ] arbitrary = [ "std", @@ -74,7 +76,7 @@ arbitrary = [ "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", "revm-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-codecs?/arbitrary" ] serde-bincode-compat = [ "serde", @@ -83,13 +85,19 @@ serde-bincode-compat = [ "alloy-eips/serde-bincode-compat" ] serde = [ + "dep:serde", "alloy-consensus/serde", "alloy-eips/serde", "alloy-primitives/serde", "bytes/serde", "rand/serde", - "reth-codecs/serde", + "reth-codecs?/serde", "revm-primitives/serde", "roaring/serde", "revm-primitives/serde", +] +reth-codec = [ + "dep:reth-codecs", + "dep:modular-bitfield", + "dep:byteorder", ] \ No newline at end of file diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 927e39a52e1..c8504f3b63c 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -1,32 +1,34 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::GenesisAccount; use alloy_primitives::{keccak256, Bytes, B256, U256}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::Buf; use derive_more::Deref; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError}; -/// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). -const LEGACY_RAW_BYTECODE_ID: u8 = 0; +#[cfg(any(test, feature = "reth-codec"))] +/// Identifiers used in [`Compact`](reth_codecs::Compact) encoding of [`Bytecode`]. +pub mod compact_ids { + /// Identifier for [`LegacyRaw`](revm_primitives::Bytecode::LegacyRaw). + pub const LEGACY_RAW_BYTECODE_ID: u8 = 0; -/// Identifier for removed bytecode variant. -const REMOVED_BYTECODE_ID: u8 = 1; + /// Identifier for removed bytecode variant. + pub const REMOVED_BYTECODE_ID: u8 = 1; -/// Identifier for [`LegacyAnalyzed`](RevmBytecode::LegacyAnalyzed). -const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; + /// Identifier for [`LegacyAnalyzed`](revm_primitives::Bytecode::LegacyAnalyzed). + pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; -/// Identifier for [`Eof`](RevmBytecode::Eof). -const EOF_BYTECODE_ID: u8 = 3; + /// Identifier for [`Eof`](revm_primitives::Bytecode::Eof). + pub const EOF_BYTECODE_ID: u8 = 3; -/// Identifier for [`Eip7702`](RevmBytecode::Eip7702). -const EIP7702_BYTECODE_ID: u8 = 4; + /// Identifier for [`Eip7702`](revm_primitives::Bytecode::Eip7702). + pub const EIP7702_BYTECODE_ID: u8 = 4; +} /// An Ethereum account. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Compact)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct Account { /// Account nonce. pub nonce: u64, @@ -85,11 +87,17 @@ impl Bytecode { } } -impl Compact for Bytecode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for Bytecode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { + use compact_ids::{ + EIP7702_BYTECODE_ID, EOF_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID, + LEGACY_RAW_BYTECODE_ID, + }; + let bytecode = match &self.0 { RevmBytecode::LegacyRaw(bytes) => bytes, RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(), @@ -128,7 +136,12 @@ impl Compact for Bytecode { // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - let len = buf.read_u32::().expect("could not read bytecode length"); + use byteorder::ReadBytesExt; + use bytes::Buf; + + use compact_ids::*; + + let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { @@ -139,8 +152,8 @@ impl Compact for Bytecode { LEGACY_ANALYZED_BYTECODE_ID => Self(unsafe { RevmBytecode::new_analyzed( bytes, - buf.read_u64::().unwrap() as usize, - JumpTable::from_slice(buf), + buf.read_u64::().unwrap() as usize, + revm_primitives::JumpTable::from_slice(buf), ) }), EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { @@ -187,9 +200,11 @@ impl From for AccountInfo { #[cfg(test)] mod tests { - use super::*; use alloy_primitives::{hex_literal::hex, B256, U256}; - use revm_primitives::LegacyAnalyzedBytecode; + use reth_codecs::Compact; + use revm_primitives::{JumpTable, LegacyAnalyzedBytecode}; + + use super::*; #[test] fn test_account() { diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 11c4dd785dd..ff41536ba3f 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -28,7 +28,6 @@ pub trait BlockBody: + MaybeSerde { /// Ordered list of signed transactions as committed in block. - // todo: requires trait for signed transaction type Transaction: Transaction; /// Returns reference to transactions in block. diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 524835879f3..695e63ed10e 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -3,15 +3,14 @@ use core::fmt; use alloy_primitives::Sealable; -use reth_codecs::Compact; -use crate::{InMemorySize, MaybeSerde}; +use crate::{InMemorySize, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. -pub trait FullBlockHeader: BlockHeader + Compact {} +pub trait FullBlockHeader: BlockHeader + MaybeCompact {} -impl FullBlockHeader for T where T: BlockHeader + Compact {} +impl FullBlockHeader for T where T: BlockHeader + MaybeCompact {} /// Abstraction of a block header. pub trait BlockHeader: diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 01ed75bd967..3f4fbd343ee 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,18 +5,18 @@ pub mod header; use alloc::fmt; -use alloy_rlp::{Decodable, Encodable}; - use crate::{BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: - Block + Encodable + Decodable + Block + alloy_rlp::Encodable + alloy_rlp::Decodable { } impl FullBlock for T where - T: Block + Encodable + Decodable + T: Block + + alloy_rlp::Encodable + + alloy_rlp::Decodable { } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index d9931fc95c5..f0a6869ed1e 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,17 +1,19 @@ -use crate::InMemorySize; pub use alloy_consensus::Header; + +use core::mem; + use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; -use core::mem; use derive_more::{AsRef, Deref}; -use reth_codecs::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; + +use crate::InMemorySize; /// A helper struct to store the block number/hash and its parent hash. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockWithParent { /// Parent hash. pub parent: B256, @@ -21,8 +23,9 @@ pub struct BlockWithParent { /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[add_arbitrary_tests(rlp)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 682fa0cf822..6fc6d75899c 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -1,13 +1,9 @@ use alloc::vec::Vec; -use bytes::BufMut; use core::fmt; + +use bytes::BufMut; use derive_more::Deref; use roaring::RoaringTreemap; -use serde::{ - de::{SeqAccess, Visitor}, - ser::SerializeSeq, - Deserialize, Deserializer, Serialize, Serializer, -}; /// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. /// @@ -90,11 +86,14 @@ impl IntegerList { } } -impl Serialize for IntegerList { +#[cfg(feature = "serde")] +impl serde::Serialize for IntegerList { fn serialize(&self, serializer: S) -> Result where - S: Serializer, + S: serde::Serializer, { + use serde::ser::SerializeSeq; + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; for e in &self.0 { seq.serialize_element(&e)?; @@ -103,8 +102,11 @@ impl Serialize for IntegerList { } } +#[cfg(feature = "serde")] struct IntegerListVisitor; -impl<'de> Visitor<'de> for IntegerListVisitor { + +#[cfg(feature = "serde")] +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { type Value = IntegerList; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -113,7 +115,7 @@ impl<'de> Visitor<'de> for IntegerListVisitor { fn visit_seq(self, mut seq: E) -> Result where - E: SeqAccess<'de>, + E: serde::de::SeqAccess<'de>, { let mut list = IntegerList::empty(); while let Some(item) = seq.next_element()? { @@ -123,10 +125,11 @@ impl<'de> Visitor<'de> for IntegerListVisitor { } } -impl<'de> Deserialize<'de> for IntegerList { +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for IntegerList { fn deserialize(deserializer: D) -> Result where - D: Deserializer<'de>, + D: serde::Deserializer<'de>, { deserializer.deserialize_byte_buf(IntegerListVisitor) } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 38e83f8ccdf..5c969152d8d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -103,3 +103,17 @@ pub trait MaybeSerde {} impl MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {} #[cfg(not(feature = "serde"))] impl MaybeSerde for T {} + +/// Helper trait that requires database encoding implementation since `reth-codec` feature is +/// enabled. +#[cfg(feature = "reth-codec")] +pub trait MaybeCompact: reth_codecs::Compact {} +/// Noop. Helper trait that would require database encoding implementation if `reth-codec` feature +/// were enabled. +#[cfg(not(feature = "reth-codec"))] +pub trait MaybeCompact {} + +#[cfg(feature = "reth-codec")] +impl MaybeCompact for T where T: reth_codecs::Compact {} +#[cfg(not(feature = "reth-codec"))] +impl MaybeCompact for T {} diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 64839ecb8b4..4370d2ac00f 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,16 +1,17 @@ //! Receipt abstraction -use crate::{InMemorySize, MaybeSerde}; +use core::fmt; + use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; -use core::fmt; -use reth_codecs::Compact; + +use crate::{InMemorySize, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. -pub trait FullReceipt: Receipt + Compact {} +pub trait FullReceipt: Receipt + MaybeCompact {} -impl FullReceipt for T where T: ReceiptExt + Compact {} +impl FullReceipt for T where T: ReceiptExt + MaybeCompact {} /// Abstraction of a receipt. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index 39b6155ee28..c6b9b1e11c7 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,13 +1,12 @@ use alloy_primitives::{B256, U256}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Account storage entry. /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageEntry { /// Storage key. pub key: B256, @@ -31,7 +30,8 @@ impl From<(B256, U256)> for StorageEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 8bd0027a8b2..7647c94496f 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -7,14 +7,13 @@ pub mod tx_type; use core::{fmt, hash::Hash}; use alloy_primitives::B256; -use reth_codecs::Compact; -use crate::{FullTxType, InMemorySize, MaybeArbitrary, MaybeSerde, TxType}; +use crate::{FullTxType, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} +pub trait FullTransaction: Transaction + MaybeCompact {} -impl FullTransaction for T where T: Transaction + Compact {} +impl FullTransaction for T where T: Transaction + MaybeCompact {} /// Abstraction of a transaction. pub trait Transaction: diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 633b0caf7b2..563f3a6f336 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -5,18 +5,19 @@ use core::hash::Hash; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; -use reth_codecs::Compact; -use crate::{FillTxEnv, FullTransaction, InMemorySize, MaybeArbitrary, MaybeSerde, Transaction}; +use crate::{ + FillTxEnv, FullTransaction, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, Transaction, +}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullSignedTx: - SignedTransaction + FillTxEnv + Compact + SignedTransaction + FillTxEnv + MaybeCompact { } impl FullSignedTx for T where - T: SignedTransaction + FillTxEnv + Compact + T: SignedTransaction + FillTxEnv + MaybeCompact { } @@ -41,7 +42,7 @@ pub trait SignedTransaction: + MaybeArbitrary + InMemorySize { - /// Transaction type that is signed. + /// Unsigned transaction type. type Transaction: Transaction; /// Returns reference to transaction hash. diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index 866242098d3..931fcb773bf 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -3,15 +3,14 @@ use core::fmt; use alloy_primitives::{U64, U8}; -use reth_codecs::Compact; -use crate::InMemorySize; +use crate::{InMemorySize, MaybeCompact}; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. -pub trait FullTxType: TxType + Compact {} +pub trait FullTxType: TxType + MaybeCompact {} -impl FullTxType for T where T: TxType + Compact {} +impl FullTxType for T where T: TxType + MaybeCompact {} /// Trait representing the behavior of a transaction type. pub trait TxType: diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 89282c8f93d..ebfa26aef0a 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -105,7 +105,12 @@ std = [ "serde/std", "alloy-trie/std" ] -reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] +reth-codec = [ + "dep:reth-codecs", + "dep:zstd", + "dep:modular-bitfield", "std", + "reth-primitives-traits/reth-codec", +] asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index bcc3e778984..3aa908a6009 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 44c0c3d962a..0997c08b784 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } # ethereum alloy-primitives.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 7dca8aa8475..af72bc43f7e 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 674f02adabc..974de01e004 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,7 +17,7 @@ reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true From 2c7b404c245dc465fbd956193400afb8df63be60 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 21 Nov 2024 23:46:44 +0700 Subject: [PATCH 607/970] perf(evm-config): return `&Arc` (#12748) --- crates/ethereum/evm/src/lib.rs | 2 +- crates/optimism/evm/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index c8ed58df03b..206230cd00e 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -55,7 +55,7 @@ impl EthEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 52b974e6c86..31074627510 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -50,7 +50,7 @@ impl OpEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &OpChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } From 2093d2bd9a72074be19f410116a9fb256eb15eed Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Nov 2024 18:03:05 +0100 Subject: [PATCH 608/970] chore(sdk): Add `NodePrimitives::BlockHeader` and `NodePrimitives::BlockBody` (#12647) --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 1 + crates/optimism/node/Cargo.toml | 5 ++++ crates/optimism/node/src/node.rs | 16 +++-------- crates/optimism/primitives/Cargo.toml | 15 +++++++++-- crates/optimism/primitives/src/lib.rs | 10 ++++--- crates/primitives-traits/Cargo.toml | 2 +- crates/primitives-traits/src/node.rs | 38 ++++++++++++++++++++++++--- crates/primitives/src/lib.rs | 11 ++------ 9 files changed, 68 insertions(+), 31 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 35f4bdda5b8..11e5b5e00b9 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -47,6 +47,7 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc + reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util diff --git a/Cargo.lock b/Cargo.lock index 10b7f2fbda5..9ea3283dc91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8328,6 +8328,7 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-rpc", "reth-payload-builder", "reth-payload-util", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 2e3e9fb4f1d..18ceee8ef8b 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -39,6 +39,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -119,3 +120,7 @@ test-utils = [ "revm/test-utils", "reth-optimism-node/test-utils", ] +reth-codec = [ + "reth-primitives/reth-codec", + "reth-optimism-primitives/reth-codec", +] \ No newline at end of file diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index bdc3d0d3a44..46841a2a5b9 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -9,8 +9,7 @@ use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, FullNodePrimitives, NodeAddOns, - PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -25,12 +24,13 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::{ witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, OpEthApi, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned, TxType}; +use reth_primitives::BlockBody; use reth_provider::{ providers::ChainStorage, BlockBodyWriter, CanonStateSubscriptions, DBProvider, EthStorage, ProviderResult, @@ -49,16 +49,6 @@ use crate::{ txpool::{OpTransactionPool, OpTransactionValidator}, OpEngineTypes, }; -/// Optimism primitive types. -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct OpPrimitives; - -impl FullNodePrimitives for OpPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type TxType = TxType; - type Receipt = Receipt; -} /// Storage implementation for Optimism. #[derive(Debug, Default, Clone)] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index ade6d4eb6bc..fc368807736 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -39,10 +39,21 @@ reth-codecs = { workspace = true, features = ["test-utils"] } rstest.workspace = true [features] -default = ["reth-codec"] +default = ["std", "reth-codec"] +std = [ + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-node-types/std", + "reth-codecs/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde/std", +] reth-codec = [ "dep:reth-codecs", - "reth-primitives/reth-codec" + "reth-primitives/reth-codec", + "reth-primitives-traits/reth-codec", ] serde = [ "dep:serde", diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 5f6b1848e64..26499bb43af 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,21 +6,25 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; pub mod tx_type; pub use tx_type::OpTxType; -use reth_node_types::NodePrimitives; -use reth_primitives::{Block, Receipt, TransactionSigned}; +use alloy_consensus::Header; +use reth_node_types::FullNodePrimitives; +use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned}; /// Optimism primitive types. #[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct OpPrimitives; -impl NodePrimitives for OpPrimitives { +impl FullNodePrimitives for OpPrimitives { type Block = Block; + type BlockHeader = Header; + type BlockBody = BlockBody; type SignedTx = TransactionSigned; type TxType = OpTxType; type Receipt = Receipt; diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index b686a2e98ba..df4491b2d12 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -100,4 +100,4 @@ reth-codec = [ "dep:reth-codecs", "dep:modular-bitfield", "dep:byteorder", -] \ No newline at end of file +] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 180920d3934..7cb321e9af3 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,6 +1,8 @@ use core::fmt; -use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType, MaybeSerde}; +use crate::{ + FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, FullTxType, MaybeSerde, +}; /// Configures all the primitive types of the node. pub trait NodePrimitives: @@ -17,6 +19,28 @@ pub trait NodePrimitives: + Eq + MaybeSerde + 'static; + /// Block header primitive. + type BlockHeader: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; + /// Block body primitive. + type BlockBody: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; /// Signed version of the transaction type. type SignedTx: Send + Sync @@ -45,6 +69,8 @@ pub trait NodePrimitives: impl NodePrimitives for () { type Block = (); + type BlockHeader = (); + type BlockBody = (); type SignedTx = (); type TxType = (); type Receipt = (); @@ -55,7 +81,11 @@ pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: FullBlock>; + type Block: FullBlock

; + /// Block header primitive. + type BlockHeader: FullBlockHeader + 'static; + /// Block body primitive. + type BlockBody: FullBlockBody + 'static; /// Signed version of the transaction type. type SignedTx: FullSignedTx; /// Transaction envelope type ID. @@ -66,9 +96,11 @@ pub trait FullNodePrimitives: impl NodePrimitives for T where - T: FullNodePrimitives, + T: FullNodePrimitives, { type Block = T::Block; + type BlockHeader = T::BlockHeader; + type BlockBody = T::BlockBody; type SignedTx = T::SignedTx; type TxType = T::TxType; type Receipt = T::Receipt; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 027bf97cfa5..c46c437dd71 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -79,17 +79,10 @@ pub mod serde_bincode_compat { #[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct EthPrimitives; -#[cfg(feature = "reth-codec")] impl reth_primitives_traits::FullNodePrimitives for EthPrimitives { type Block = crate::Block; - type SignedTx = crate::TransactionSigned; - type TxType = crate::TxType; - type Receipt = crate::Receipt; -} - -#[cfg(not(feature = "reth-codec"))] -impl NodePrimitives for EthPrimitives { - type Block = crate::Block; + type BlockHeader = alloy_consensus::Header; + type BlockBody = crate::BlockBody; type SignedTx = crate::TransactionSigned; type TxType = crate::TxType; type Receipt = crate::Receipt; From f8d683e80ee08412e19cc212a4c2bf8ebeca245b Mon Sep 17 00:00:00 2001 From: Nils Date: Thu, 21 Nov 2024 18:03:15 +0100 Subject: [PATCH 609/970] Improve metrics hooks setup (fixes #12672) (#12684) Co-authored-by: Matthias Seitz --- Cargo.lock | 3 - crates/cli/commands/src/stage/run.rs | 19 ++++- crates/node/builder/src/launch/common.rs | 17 +++- crates/node/metrics/Cargo.toml | 4 - crates/node/metrics/src/hooks.rs | 98 ++++++++++++++++-------- crates/node/metrics/src/server.rs | 8 +- 6 files changed, 99 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ea3283dc91..db7adde0479 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8143,10 +8143,7 @@ dependencies = [ "metrics-util", "procfs 0.16.0", "reqwest", - "reth-db-api", "reth-metrics", - "reth-primitives-traits", - "reth-provider", "reth-tasks", "socket2", "tikv-jemalloc-ctl", diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index f3c3bbef965..c852eea05a7 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -11,6 +11,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; +use reth_db_api::database_metrics::DatabaseMetrics; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -132,10 +133,20 @@ impl> Command }, ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() }, ctx.task_executor, - Hooks::new( - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - ), + Hooks::builder() + .with_hook({ + let db = provider_factory.db_ref().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = provider_factory.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics from static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 225f2029c28..9cc841f6fea 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -18,7 +18,7 @@ use reth_blockchain_tree::{ use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; -use reth_db_api::database::Database; +use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; @@ -536,7 +536,20 @@ where }, ChainSpecInfo { name: self.left().config.chain.chain().to_string() }, self.task_executor().clone(), - Hooks::new(self.database().clone(), self.static_file_provider()), + Hooks::builder() + .with_hook({ + let db = self.database().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = self.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics for the static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 7e271f93ce5..3d79d11db7d 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -8,9 +8,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -reth-db-api.workspace = true -reth-primitives-traits.workspace = true -reth-provider.workspace = true reth-metrics.workspace = true reth-tasks.workspace = true @@ -37,7 +34,6 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true socket2 = { version = "0.5", default-features = false } -reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs index 21d12614f62..3b6d23a3900 100644 --- a/crates/node/metrics/src/hooks.rs +++ b/crates/node/metrics/src/hooks.rs @@ -1,20 +1,59 @@ use metrics_process::Collector; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_primitives_traits::NodePrimitives; -use reth_provider::providers::StaticFileProvider; -use std::{ - fmt::{self}, - sync::Arc, -}; +use std::{fmt, sync::Arc}; -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} +/// The simple alias for function types that are `'static`, `Send`, and `Sync`. +pub trait Hook: Fn() + Send + Sync + 'static {} +impl Hook for T {} -impl fmt::Debug for Hooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hooks_len = self.inner.len(); - f.debug_struct("Hooks") - .field("inner", &format!("Arc>>, len: {}", hooks_len)) +/// A builder-like type to create a new [`Hooks`] instance. +pub struct HooksBuilder { + hooks: Vec>>, +} + +impl HooksBuilder { + /// Registers a [`Hook`]. + pub fn with_hook(self, hook: impl Hook) -> Self { + self.with_boxed_hook(Box::new(hook)) + } + + /// Registers a [`Hook`] by calling the provided closure. + pub fn install_hook(self, f: F) -> Self + where + F: FnOnce() -> H, + H: Hook, + { + self.with_hook(f()) + } + + /// Registers a [`Hook`]. + #[inline] + pub fn with_boxed_hook(mut self, hook: Box>) -> Self { + self.hooks.push(hook); + self + } + + /// Builds the [`Hooks`] collection from the registered hooks. + pub fn build(self) -> Hooks { + Hooks { inner: Arc::new(self.hooks) } + } +} + +impl Default for HooksBuilder { + fn default() -> Self { + Self { + hooks: vec![ + Box::new(|| Collector::default().collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ], + } + } +} + +impl std::fmt::Debug for HooksBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HooksBuilder") + .field("hooks", &format_args!("Vec>, len: {}", self.hooks.len())) .finish() } } @@ -26,24 +65,10 @@ pub struct Hooks { } impl Hooks { - /// Create a new set of hooks - pub fn new(db: Metrics, static_file_provider: StaticFileProvider) -> Self - where - Metrics: DatabaseMetrics + 'static + Send + Sync, - N: NodePrimitives, - { - let hooks: Vec>> = vec![ - Box::new(move || db.report_metrics()), - Box::new(move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }), - Box::new(move || Collector::default().collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - Self { inner: Arc::new(hooks) } + /// Creates a new [`HooksBuilder`] instance. + #[inline] + pub fn builder() -> HooksBuilder { + HooksBuilder::default() } pub(crate) fn iter(&self) -> impl Iterator>> { @@ -51,6 +76,15 @@ impl Hooks { } } +impl fmt::Debug for Hooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hooks_len = self.inner.len(); + f.debug_struct("Hooks") + .field("inner", &format_args!("Arc>>, len: {}", hooks_len)) + .finish() + } +} + #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use metrics::gauge; diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 313329fb56a..313b578f800 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -206,7 +206,6 @@ const fn describe_io_stats() {} mod tests { use super::*; use reqwest::Client; - use reth_provider::{test_utils::create_test_provider_factory, StaticFileProviderFactory}; use reth_tasks::TaskManager; use socket2::{Domain, Socket, Type}; use std::net::{SocketAddr, TcpListener}; @@ -236,8 +235,7 @@ mod tests { let tasks = TaskManager::current(); let executor = tasks.executor(); - let factory = create_test_provider_factory(); - let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); + let hooks = Hooks::builder().build(); let listen_addr = get_random_available_addr(); let config = @@ -252,7 +250,7 @@ mod tests { // Check the response body let body = response.text().await.unwrap(); - assert!(body.contains("reth_db_table_size")); - assert!(body.contains("reth_jemalloc_metadata")); + assert!(body.contains("reth_process_cpu_seconds_total")); + assert!(body.contains("reth_process_start_time_seconds")); } } From 3d477e7d6fde555856529abca3de7b658662e125 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Nov 2024 18:53:57 +0100 Subject: [PATCH 610/970] Replace use of fully qualified syntax (#12751) --- crates/e2e-test-utils/src/lib.rs | 6 ++---- crates/node/builder/src/builder/mod.rs | 6 ++---- crates/node/builder/src/launch/common.rs | 6 ++---- crates/node/builder/src/launch/engine.rs | 3 +-- crates/node/builder/src/launch/mod.rs | 3 +-- crates/node/builder/src/setup.rs | 6 ++---- 6 files changed, 10 insertions(+), 20 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index df459f641b4..73a7e39f1a4 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -59,8 +59,7 @@ where Components: NodeComponents, Network: PeersHandleProvider>, >, N::AddOns: RethRpcAddOns>, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -135,8 +134,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 3ad90a493f1..65ae704fe83 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -365,8 +365,7 @@ where >>::Components, >, >, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { self.node(node).launch().await } @@ -557,8 +556,7 @@ where T: NodeTypesWithEngine + NodeTypesForProvider, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, - T::Primitives: - FullNodePrimitives>, + T::Primitives: FullNodePrimitives, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 9cc841f6fea..47ec68ff0d7 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -408,8 +408,7 @@ where pub async fn create_provider_factory(&self) -> eyre::Result> where N: ProviderNodeTypes, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { let factory = ProviderFactory::new( self.right().clone(), @@ -476,8 +475,7 @@ where ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index f485be2c22d..ef1edc899eb 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -77,8 +77,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - Types::Primitives: - FullNodePrimitives>, + Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index be317e4be31..a1819948ee4 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -102,8 +102,7 @@ where T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, - Types::Primitives: - FullNodePrimitives>, + Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 400e3d84456..71f0ceb56cd 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -41,8 +41,7 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -92,8 +91,7 @@ where Body = <::Block as reth_node_api::Block>::Body, > + 'static, Executor: BlockExecutorProvider, - N::Primitives: - FullNodePrimitives>, + N::Primitives: FullNodePrimitives, { let mut builder = Pipeline::::builder(); From c2323b4e49c2dc11a5f71253c2e0f1fc27ae7df0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Nov 2024 19:17:16 +0100 Subject: [PATCH 611/970] chore(sdk): add adapters for header and body to `NodeTypes` (#12723) --- Cargo.lock | 6 +++--- crates/node/types/src/lib.rs | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db7adde0479..eabde10a0a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4590,7 +4590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -11181,7 +11181,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -11672,7 +11672,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2da8180a956..2e5558a33bf 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -232,3 +232,9 @@ where { type Engine = E; } + +/// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. +pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`]. +pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody; From edeacbecfbf662eeb51a8a28c253df8f4d3123fa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 19:46:39 +0100 Subject: [PATCH 612/970] fix: bad databaseargs default (#12747) --- crates/storage/db/src/implementation/mdbx/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 10f3b228230..006213e4cb9 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -66,7 +66,7 @@ impl DatabaseEnvKind { } /// Arguments for database initialization. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, @@ -99,6 +99,12 @@ pub struct DatabaseArguments { exclusive: Option, } +impl Default for DatabaseArguments { + fn default() -> Self { + Self::new(ClientVersion::default()) + } +} + impl DatabaseArguments { /// Create new database arguments with given client version. pub fn new(client_version: ClientVersion) -> Self { From 0558235b98a8fa16aa745c6f3ecb65bb9bc8e00a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 21 Nov 2024 23:47:33 +0400 Subject: [PATCH 613/970] refactor: unify logic for blocks removal (#12743) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/cli/commands/src/stage/unwind.rs | 46 +- crates/optimism/node/src/node.rs | 8 + crates/stages/stages/src/stages/bodies.rs | 190 +++---- .../provider/src/providers/database/mod.rs | 15 - .../src/providers/database/provider.rs | 478 ++++-------------- crates/storage/provider/src/traits/block.rs | 35 +- crates/storage/provider/src/writer/mod.rs | 8 +- crates/storage/storage-api/src/chain.rs | 19 + 9 files changed, 267 insertions(+), 536 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 8e192492593..c778e0508da 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -25,7 +25,7 @@ use reth_provider::{ BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, - StaticFileProviderFactory, + StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -1333,7 +1333,7 @@ where info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. let blocks_and_execution = provider_rw - .take_block_and_execution_range(revert_range) + .take_block_and_execution_above(revert_until, StorageLocation::Database) .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; provider_rw.commit()?; diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 4f47a70b02d..2d29121d069 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::B256; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -17,6 +17,7 @@ use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, + StorageLocation, }; use reth_prune::PruneModes; use reth_stages::{ @@ -25,7 +26,7 @@ use reth_stages::{ ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; -use std::{ops::RangeInclusive, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; use tracing::info; @@ -52,16 +53,13 @@ impl> Command pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; - let range = self.command.unwind_range(provider_factory.clone())?; - if *range.start() == 0 { - eyre::bail!("Cannot unwind genesis block") - } + let target = self.command.unwind_target(provider_factory.clone())?; let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static // files. If that's the case, then copy all available data from MDBX to static files, and @@ -75,9 +73,9 @@ impl> Command } if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); } else { - info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); } // This will build an offline-only pipeline if the `offline` flag is enabled @@ -86,29 +84,25 @@ impl> Command // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind((*range.start()).saturating_sub(1), None)?; + pipeline.unwind(target, None)?; } else { - info!(target: "reth::cli", ?range, "Executing a database unwind."); + info!(target: "reth::cli", ?target, "Executing a database unwind."); let provider = provider_factory.provider_rw()?; - let _ = provider - .take_block_and_execution_range(range.clone()) + provider + .remove_block_and_execution_above(target, StorageLocation::Both) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; // update finalized block if needed let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - let range_min = - range.clone().min().ok_or(eyre::eyre!("Could not fetch lower range end"))?; - if last_saved_finalized_block_number.is_none() || - Some(range_min) < last_saved_finalized_block_number - { - provider.save_finalized_block_number(BlockNumber::from(range_min))?; + if last_saved_finalized_block_number.is_none_or(|f| f > target) { + provider.save_finalized_block_number(target)?; } provider.commit()?; } - info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); + info!(target: "reth::cli", ?target, "Unwound blocks"); Ok(()) } @@ -183,13 +177,11 @@ enum Subcommands { } impl Subcommands { - /// Returns the block range to unwind. - /// - /// This returns an inclusive range: [target..=latest] - fn unwind_range>>( + /// Returns the block to unwind to. The returned block will stay in database. + fn unwind_target>>( &self, factory: ProviderFactory, - ) -> eyre::Result> { + ) -> eyre::Result { let provider = factory.provider()?; let last = provider.last_block_number()?; let target = match self { @@ -200,11 +192,11 @@ impl Subcommands { BlockHashOrNumber::Number(num) => *num, }, Self::NumBlocks { amount } => last.saturating_sub(*amount), - } + 1; + }; if target > last { eyre::bail!("Target block number is higher than the latest block number") } - Ok(target..=last) + Ok(target) } } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 46841a2a5b9..82b2ce2ebc2 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -62,6 +62,14 @@ impl> BlockBodyWriter for ) -> ProviderResult<()> { self.0.write_block_bodies(provider, bodies) } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: alloy_primitives::BlockNumber, + ) -> ProviderResult<()> { + self.0.remove_block_bodies_above(provider, block) + } } impl ChainStorage for OpStorage { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b6eab349e16..e541b908104 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -10,10 +10,7 @@ use tracing::*; use alloy_primitives::TxNumber; use reth_db::{tables, transaction::DbTx}; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW}, - transaction::DbTxMut, -}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ @@ -70,6 +67,82 @@ impl BodyStage { pub const fn new(downloader: D) -> Self { Self { downloader, buffer: None } } + + /// Ensures that static files and database are in sync. + fn ensure_consistency( + &self, + provider: &Provider, + unwind_block: Option, + ) -> Result<(), StageError> + where + Provider: DBProvider + BlockReader + StaticFileProviderFactory, + { + // Get id for the next tx_num of zero if there are no transactions. + let next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); + + let static_file_provider = provider.static_file_provider(); + + // Make sure Transactions static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_tx_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_tx_num.cmp(&next_tx_num) { + // If static files are ahead, we are currently unwinding the stage or we didn't reach + // the database commit in a previous stage run. So, our only solution is to unwind the + // static files and proceed from the database expected height. + Ordering::Greater => { + let highest_db_block = + provider.tx_ref().entries::()? as u64; + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_block { + let next_tx_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + // This means we need a deeper unwind. + if next_tx_num_after_unwind > next_static_file_tx_num { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + )?) + } + } else { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + )?) + } + } + Ordering::Equal => {} + } + + Ok(()) + } } impl Stage for BodyStage @@ -122,50 +195,9 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - // Get id for the next tx_num of zero if there are no transactions. - let next_tx_num = provider - .tx_ref() - .cursor_read::()? - .last()? - .map(|(id, _)| id + 1) - .unwrap_or_default(); - - let static_file_provider = provider.static_file_provider(); - - // Make sure Transactions static file is at the same height. If it's further, this - // input execution was interrupted previously and we need to unwind the static file. - let next_static_file_tx_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .map(|id| id + 1) - .unwrap_or_default(); - - match next_static_file_tx_num.cmp(&next_tx_num) { - // If static files are ahead, then we didn't reach the database commit in a previous - // stage run. So, our only solution is to unwind the static files and proceed from the - // database expected height. - Ordering::Greater => { - let mut static_file_producer = - static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; - static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; - // Since this is a database <-> static file inconsistency, we commit the change - // straight away. - static_file_producer.commit()?; - } - // If static files are behind, then there was some corruption or loss of files. This - // error will trigger an unwind, that will bring the database to the same height as the - // static files. - Ordering::Less => { - return Err(missing_static_data_error( - next_static_file_tx_num.saturating_sub(1), - &static_file_provider, - provider, - )?) - } - Ordering::Equal => {} - } + self.ensure_consistency(provider, None)?; - debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); + debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync"); let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); @@ -200,66 +232,8 @@ where ) -> Result { self.buffer.take(); - let static_file_provider = provider.static_file_provider(); - let tx = provider.tx_ref(); - // Cursors to unwind bodies, ommers - let mut body_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - // Cursors to unwind transitions - let mut tx_block_cursor = tx.cursor_write::()?; - - let mut rev_walker = body_cursor.walk_back(None)?; - while let Some((number, block_meta)) = rev_walker.next().transpose()? { - if number <= input.unwind_to { - break - } - - // Delete the ommers entry if any - if ommers_cursor.seek_exact(number)?.is_some() { - ommers_cursor.delete_current()?; - } - - // Delete the withdrawals entry if any - if withdrawals_cursor.seek_exact(number)?.is_some() { - withdrawals_cursor.delete_current()?; - } - - // Delete all transaction to block values. - if !block_meta.is_empty() && - tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() - { - tx_block_cursor.delete_current()?; - } - - // Delete the current body value - rev_walker.delete_current()?; - } - - let mut static_file_producer = - static_file_provider.latest_writer(StaticFileSegment::Transactions)?; - - // Unwind from static files. Get the current last expected transaction from DB, and match it - // on static file - let db_tx_num = - body_cursor.last()?.map(|(_, block_meta)| block_meta.last_tx_num()).unwrap_or_default(); - let static_file_tx_num: u64 = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // If there are more transactions on database, then we are missing static file data and we - // need to unwind further. - if db_tx_num > static_file_tx_num { - return Err(missing_static_data_error( - static_file_tx_num, - &static_file_provider, - provider, - )?) - } - - // Unwinds static file - static_file_producer - .prune_transactions(static_file_tx_num.saturating_sub(db_tx_num), input.unwind_to)?; + self.ensure_consistency(provider, Some(input.unwind_to))?; + provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) @@ -268,6 +242,8 @@ where } } +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. fn missing_static_data_error( last_tx_num: TxNumber, static_file_provider: &StaticFileProvider, diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index cc50aa35145..354eb10c103 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -781,21 +781,6 @@ mod tests { let db_senders = provider.senders_by_tx_range(range); assert_eq!(db_senders, Ok(vec![])); - - let result = provider.take_block_transaction_range(0..=0); - assert_eq!( - result, - Ok(vec![( - 0, - block - .body - .transactions - .iter() - .cloned() - .map(|tx| tx.into_ecrecovered().unwrap()) - .collect() - )]) - ) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 92cc8df2f5c..8c390b06c08 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -26,7 +26,7 @@ use alloy_eips::{ BlockHashOrNumber, }; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use itertools::{izip, Itertools}; +use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_db::{ @@ -41,7 +41,7 @@ use reth_db_api::{ }, table::Table, transaction::{DbTx, DbTxMut}, - DatabaseError, DbTxUnwindExt, + DatabaseError, }; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; @@ -50,7 +50,7 @@ use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + TransactionSigned, TransactionSignedNoHash, }; use reth_primitives_traits::{BlockBody as _, FullNodePrimitives, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -75,7 +75,7 @@ use std::{ sync::{mpsc, Arc}, }; use tokio::sync::watch; -use tracing::{debug, error, trace}; +use tracing::{debug, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -881,276 +881,6 @@ impl DatabaseProvider { Ok(self.tx.commit()?) } - /// Remove requested block transactions, without returning them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn remove_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.take::(range)?; - - if block_bodies.is_empty() { - return Ok(()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(()) - } - - // Get transactions so we can then remove - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - // remove senders - self.remove::(first_transaction..=last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - Ok(()) - } - - /// Get requested blocks transaction with senders, also removing them from the database - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn take_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.take::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Remove the given range of blocks, without returning any of the blocks. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`remove_block_transaction_range`](Self::remove_block_transaction_range). - pub fn remove_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - let block_headers = self.remove::(range.clone())?; - if block_headers == 0 { - return Ok(()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove_block_transaction_range(range.clone())?; - self.remove::(range)?; - - Ok(()) - } - - /// Remove the given range of blocks, and return them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`take_block_transaction_range`](Self::take_block_transaction_range). - pub fn take_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - N::ChainSpec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Signers - - let block_headers = self.take::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - let block_header_hashes = self.take::(range.clone())?; - let block_ommers = self.take::(range.clone())?; - let block_withdrawals = self.take::(range.clone())?; - let block_tx = self.take_block_transaction_range(range.clone())?; - - let mut blocks = Vec::with_capacity(block_headers.len()); - - // rm HeaderTerminalDifficulties - self.remove::(range)?; - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals }, - }, - senders, - }) - } - - Ok(blocks) - } - /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> ProviderResult> @@ -2998,52 +2728,48 @@ impl StateReader for DatabaseProvider { impl BlockExecutionWriter for DatabaseProvider { - fn take_block_and_execution_range( + fn take_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult { - self.unwind_trie_state_range(range.clone())?; + let range = block + 1..=self.last_block_number()?; - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // get execution res let execution_state = self.take_state(range.clone())?; + let blocks = self.sealed_block_with_senders_range(range)?; + // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_transactions_from)?; // Update pipeline progress - if let Some(fork_number) = unwind_to { - self.update_pipeline_stages(fork_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(Chain::new(blocks, execution_state, None)) } - fn remove_block_and_execution_range( + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult<()> { - self.unwind_trie_state_range(range.clone())?; + let range = block + 1..=self.last_block_number()?; - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // remove execution res - self.remove_state(range.clone())?; + self.remove_state(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_transactions_from)?; // Update pipeline progress - if let Some(block_number) = unwind_to { - self.update_pipeline_stages(block_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(()) } @@ -3230,6 +2956,92 @@ impl BlockWriter Ok(()) } + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + let mut canonical_headers_cursor = self.tx.cursor_write::()?; + let mut rev_headers = canonical_headers_cursor.walk_back(None)?; + + while let Some(Ok((number, hash))) = rev_headers.next() { + if number <= block { + break + } + self.tx.delete::(hash, None)?; + rev_headers.delete_current()?; + } + self.remove::(block + 1..)?; + self.remove::(block + 1..)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + // Last transaction to be removed + let unwind_tx_to = self + .tx + .cursor_read::()? + .last()? + // shouldn't happen because this was OK above + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))? + .1 + .last_tx_num(); + + if unwind_tx_from < unwind_tx_to { + for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { + self.tx.delete::(hash, None)?; + } + } + + self.remove::(unwind_tx_from..)?; + + self.remove_bodies_above(block, remove_transactions_from)?; + + Ok(()) + } + + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + self.storage.writer().remove_block_bodies_above(self, block)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + self.remove::(block + 1..)?; + self.remove::(unwind_tx_from..)?; + + if remove_transactions_from.database() { + self.remove::(unwind_tx_from..)?; + } + + if remove_transactions_from.static_files() { + let static_file_tx_num = self + .static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions); + + if let Some(static_tx) = static_file_tx_num { + if static_tx >= unwind_tx_from { + self.static_file_provider + .latest_writer(StaticFileSegment::Transactions)? + .prune_transactions(static_tx - unwind_tx_from + 1, block)?; + } + } + } + + Ok(()) + } + /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, @@ -3381,79 +3193,3 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } } - -/// Helper method to recover senders for any blocks in the db which do not have senders. This -/// compares the length of the input senders [`Vec`], with the length of given transactions [`Vec`], -/// and will add to the input senders vec if there are more transactions. -/// -/// NOTE: This will modify the input senders list, which is why a mutable reference is required. -fn recover_block_senders( - senders: &mut Vec<(u64, Address)>, - transactions: &[(u64, TransactionSigned)], - first_transaction: u64, - last_transaction: u64, -) -> ProviderResult<()> { - // Recover senders manually if not found in db - // NOTE: Transactions are always guaranteed to be in the database whereas - // senders might be pruned. - if senders.len() != transactions.len() { - if senders.len() > transactions.len() { - error!(target: "providers::db", senders=%senders.len(), transactions=%transactions.len(), - first_tx=%first_transaction, last_tx=%last_transaction, - "unexpected senders and transactions mismatch"); - } - let missing = transactions.len().saturating_sub(senders.len()); - senders.reserve(missing); - // Find all missing senders, their corresponding tx numbers and indexes to the original - // `senders` vector at which the recovered senders will be inserted. - let mut missing_senders = Vec::with_capacity(missing); - { - let mut senders = senders.iter().peekable(); - - // `transactions` contain all entries. `senders` contain _some_ of the senders for - // these transactions. Both are sorted and indexed by `TxNumber`. - // - // The general idea is to iterate on both `transactions` and `senders`, and advance - // the `senders` iteration only if it matches the current `transactions` entry's - // `TxNumber`. Otherwise, add the transaction to the list of missing senders. - for (i, (tx_number, transaction)) in transactions.iter().enumerate() { - if let Some((sender_tx_number, _)) = senders.peek() { - if sender_tx_number == tx_number { - // If current sender's `TxNumber` matches current transaction's - // `TxNumber`, advance the senders iterator. - senders.next(); - } else { - // If current sender's `TxNumber` doesn't match current transaction's - // `TxNumber`, add it to missing senders. - missing_senders.push((i, tx_number, transaction)); - } - } else { - // If there's no more senders left, but we're still iterating over - // transactions, add them to missing senders - missing_senders.push((i, tx_number, transaction)); - } - } - } - - // Recover senders - let recovered_senders = TransactionSigned::recover_signers( - missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), - missing_senders.len(), - ) - .ok_or(ProviderError::SenderRecoveryError)?; - - // Insert recovered senders along with tx numbers at the corresponding indexes to the - // original `senders` vector - for ((i, tx_number, _), sender) in missing_senders.into_iter().zip(recovered_senders) { - // Insert will put recovered senders at necessary positions and shift the rest - senders.insert(i, (*tx_number, sender)); - } - - // Debug assertions which are triggered during the test to ensure that all senders are - // present and sorted - debug_assert_eq!(senders.len(), transactions.len(), "missing one or more senders"); - debug_assert!(senders.iter().tuple_windows().all(|(a, b)| a.0 < b.0), "senders not sorted"); - } - - Ok(()) -} diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index c84534e7a5d..c2ce477051d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -5,7 +5,6 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::SealedBlockWithSenders; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -use std::ops::RangeInclusive; /// An enum that represents the storage location for a piece of data. #[derive(Debug, Copy, Clone, PartialEq, Eq)] @@ -33,16 +32,22 @@ impl StorageLocation { /// BlockExecution Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockExecutionWriter: BlockWriter + Send + Sync { - /// Take range of blocks and its execution result - fn take_block_and_execution_range( + /// Take all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + fn take_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult; - /// Remove range of blocks and its execution result - fn remove_block_and_execution_range( + /// Remove all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult<()>; } @@ -81,6 +86,22 @@ pub trait BlockWriter: Send + Sync { write_transactions_to: StorageLocation, ) -> ProviderResult<()>; + /// Removes all blocks above the given block number from the database. + /// + /// Note: This does not remove state or execution data. + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; + /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. /// diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 3878cf2a9e3..30c5f0d5291 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -273,9 +273,7 @@ where // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_range( - block_number + 1..=self.database().last_block_number()?, - )?; + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. @@ -287,10 +285,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - self.static_file() - .get_writer(block_number, StaticFileSegment::Transactions)? - .prune_transactions(total_txs, block_number)?; - if !self.database().prune_modes_ref().has_receipts_pruning() { self.static_file() .get_writer(block_number, StaticFileSegment::Receipts)? diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 099f61f1bcb..d5228bdddf7 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -5,6 +5,7 @@ use reth_db::{ models::{StoredBlockOmmers, StoredBlockWithdrawals}, tables, transaction::DbTxMut, + DbTxUnwindExt, }; use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; use reth_storage_errors::provider::ProviderResult; @@ -21,6 +22,13 @@ pub trait BlockBodyWriter { provider: &Provider, bodies: Vec<(BlockNumber, Option)>, ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()>; } /// Trait that implements how chain-specific types are written to the storage. @@ -69,4 +77,15 @@ where Ok(()) } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()> { + provider.tx_ref().unwind_table_by_num::(block)?; + provider.tx_ref().unwind_table_by_num::(block)?; + + Ok(()) + } } From 367478c6f1334876d1aed1c50319e9d714bb18e3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 21:21:28 +0100 Subject: [PATCH 614/970] chore: use hash ref (#12756) --- crates/net/network/src/transactions/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index a4eef2fa99c..30c75f63e5d 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -736,7 +736,7 @@ where // Iterate through the transactions to propagate and fill the hashes and full // transaction for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Only include if the peer hasn't seen the transaction full_transactions.push(&tx); } @@ -815,7 +815,7 @@ where hashes.extend(to_propagate) } else { for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Include if the peer hasn't seen it hashes.push(&tx); } @@ -885,7 +885,7 @@ where for tx in &to_propagate { // Only proceed if the transaction is not in the peer's list of seen // transactions - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { builder.push(tx); } } @@ -1486,8 +1486,8 @@ impl PropagateTransaction { Self { size, transaction } } - fn hash(&self) -> TxHash { - *self.transaction.tx_hash() + fn tx_hash(&self) -> &TxHash { + self.transaction.tx_hash() } } @@ -1678,9 +1678,9 @@ impl PooledTransactionsHashesBuilder { fn push(&mut self, tx: &PropagateTransaction) { match self { - Self::Eth66(msg) => msg.0.push(tx.hash()), + Self::Eth66(msg) => msg.0.push(*tx.tx_hash()), Self::Eth68(msg) => { - msg.hashes.push(tx.hash()); + msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); msg.types.push(tx.transaction.transaction().tx_type().into()); } From 1061e46816e3a91212d40c6469adb81ea6d8c86c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 21:44:50 +0100 Subject: [PATCH 615/970] chore: use new is_broadcastable_in_full (#12757) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/net/network/src/transactions/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 30c75f63e5d..20525325ec1 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1607,6 +1607,7 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). + /// See also [`TxType::is_broadcastable_in_full`]. fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // @@ -1616,7 +1617,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if transaction.transaction.transaction().tx_type().is_eip4844() { + if !transaction.transaction.transaction().tx_type().is_broadcastable_in_full() { self.pooled.push(transaction); return } From 6f6fb005ab681f97a80a0d8ce8e8a364f363d114 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 21 Nov 2024 23:56:18 +0100 Subject: [PATCH 616/970] chore: remove feature gated import (#12761) --- crates/primitives/src/transaction/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d50aea14c46..ea436a92cb5 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,7 +1,5 @@ //! Transaction types. -#[cfg(any(test, feature = "reth-codec"))] -use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, @@ -606,11 +604,11 @@ impl reth_codecs::Compact for Transaction { // reading the full 8 bits (single byte) and match on this transaction type. let identifier = buf.get_u8(); match identifier { - EIP4844_TX_TYPE_ID => { + alloy_consensus::constants::EIP4844_TX_TYPE_ID => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); (Self::Eip4844(tx), buf) } - EIP7702_TX_TYPE_ID => { + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); (Self::Eip7702(tx), buf) } From 4442b5d6fa4b5e8f09b5467143e9429de9378e64 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:40:29 -0600 Subject: [PATCH 617/970] feat: convert hash field to `OnceLock` on `TransactionSigned` (#12596) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- .../src/commands/debug_cmd/build_block.rs | 2 +- crates/chain-state/src/notifications.rs | 20 ++++-- crates/engine/util/src/reorg.rs | 2 +- crates/ethereum/payload/src/lib.rs | 2 +- crates/evm/execution-types/src/chain.rs | 2 +- crates/net/network/src/transactions/mod.rs | 2 +- crates/net/network/tests/it/txgossip.rs | 2 +- crates/optimism/rpc/src/eth/block.rs | 2 +- crates/optimism/rpc/src/eth/transaction.rs | 3 +- crates/primitives/src/alloy_compat.rs | 2 +- crates/primitives/src/transaction/mod.rs | 61 ++++++++++++------- crates/primitives/src/transaction/pooled.rs | 34 ++++++----- crates/primitives/src/transaction/sidecar.rs | 7 ++- crates/primitives/src/transaction/variant.rs | 4 +- .../src/segments/user/transaction_lookup.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 2 +- crates/rpc/rpc/src/debug.rs | 6 +- crates/rpc/rpc/src/eth/helpers/block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/types.rs | 3 +- crates/stages/stages/src/stages/tx_lookup.rs | 2 +- .../provider/src/providers/database/mod.rs | 7 ++- .../src/providers/database/provider.rs | 6 +- .../storage/provider/src/test_utils/blocks.rs | 2 +- .../transaction-pool/src/blobstore/tracker.rs | 12 ++-- crates/transaction-pool/src/maintain.rs | 4 +- .../transaction-pool/src/test_utils/mock.rs | 2 +- docs/crates/network.md | 4 +- examples/db-access/src/main.rs | 13 ++-- 29 files changed, 127 insertions(+), 87 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index adb2c83b1b2..aa89b4112c3 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -201,7 +201,7 @@ impl> Command { let encoded_length = pooled.encode_2718_len(); // insert the blob into the store - blob_store.insert(transaction.hash, sidecar)?; + blob_store.insert(transaction.hash(), sidecar)?; encoded_length } diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 865f2bd6584..03d740d3d13 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -196,7 +196,7 @@ impl Stream for ForkChoiceStream { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::B256; + use alloy_primitives::{b256, B256}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; @@ -332,7 +332,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: block1.num_hash(), - tx_receipts: vec![(B256::default(), receipt1)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + receipt1 + )] } ); @@ -403,7 +407,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: old_block1.num_hash(), - tx_receipts: vec![(B256::default(), old_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + old_receipt + )] } ); // Confirm this is from the reverted segment. @@ -415,7 +423,11 @@ mod tests { block_receipts[1].0, BlockReceipts { block: new_block1.num_hash(), - tx_receipts: vec![(B256::default(), new_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + new_receipt + )] } ); // Confirm this is from the committed segment. diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index ec69bbd0024..fd80fa9e165 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -339,7 +339,7 @@ where // Treat error as fatal Err(error) => { return Err(RethError::Execution(BlockExecutionError::Validation( - BlockValidationError::EVM { hash: tx.hash, error: Box::new(error) }, + BlockValidationError::EVM { hash: tx.hash(), error: Box::new(error) }, ))) } }; diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 80f6786c404..ac6427caf36 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -399,7 +399,7 @@ where // grab the blob sidecars from the executed txs blob_sidecars = pool .get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash()).collect(), ) .map_err(PayloadBuilderError::other)?; diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 2c672884d60..200a37423cf 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -441,7 +441,7 @@ impl ChainBlocks<'_> { /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash)) + self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash())) } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 20525325ec1..1c93ae54971 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -2178,7 +2178,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(&signed_tx.hash).is_some()); + assert!(pool.get(signed_tx.hash_ref()).is_some()); handle.terminate().await; } diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 2e2ee4a031a..98624c4c609 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -95,7 +95,7 @@ async fn test_4844_tx_gossip_penalization() { let peer0_reputation_after = peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation(); assert_ne!(peer0_reputation_before, peer0_reputation_after); - assert_eq!(received, txs[1].transaction().hash); + assert_eq!(received, txs[1].transaction().hash()); // this will return an [`Empty`] error because blob txs are disallowed to be broadcasted assert!(peer1_tx_listener.try_recv().is_err()); diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 6678fbe5df4..22d26e824b3 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -48,7 +48,7 @@ where .enumerate() .map(|(idx, (ref tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 11e33817229..dad151c41c4 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -84,7 +84,8 @@ where tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); let mut deposit_receipt_version = None; let mut deposit_nonce = None; diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 462b27f9c73..a72c83996c0 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -156,7 +156,7 @@ impl TryFrom for TransactionSigned { _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - Ok(Self { transaction, signature, hash }) + Ok(Self { transaction, signature, hash: hash.into() }) } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ea436a92cb5..5900abb42c9 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -14,11 +14,14 @@ use alloy_primitives::{ keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use core::mem; +use core::{ + hash::{Hash, Hasher}, + mem, +}; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] -use once_cell::sync::Lazy as LazyLock; +use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; @@ -26,7 +29,7 @@ use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] -use std::sync::LazyLock; +use std::sync::{LazyLock, OnceLock}; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, @@ -1078,10 +1081,11 @@ impl From for TransactionSignedNoHash { /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] pub struct TransactionSigned { /// Transaction hash - pub hash: TxHash, + #[serde(skip)] + pub hash: OnceLock, /// The transaction signature values pub signature: Signature, /// Raw transaction info @@ -1106,6 +1110,21 @@ impl AsRef for TransactionSigned { } } +impl Hash for TransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +impl PartialEq for TransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.hash_ref() == other.hash_ref() + } +} + // === impl TransactionSigned === impl TransactionSigned { @@ -1120,13 +1139,13 @@ impl TransactionSigned { } /// Transaction hash. Used to identify transaction. - pub const fn hash(&self) -> TxHash { - self.hash + pub fn hash(&self) -> TxHash { + *self.hash_ref() } /// Reference to transaction hash. Used to identify transaction. - pub const fn hash_ref(&self) -> &TxHash { - &self.hash + pub fn hash_ref(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) } /// Recover signer from signature and hash. @@ -1259,9 +1278,7 @@ impl TransactionSigned { /// /// This will also calculate the transaction hash using its encoding. pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - let mut initial_tx = Self { transaction, hash: Default::default(), signature }; - initial_tx.hash = initial_tx.recalculate_hash(); - initial_tx + Self { transaction, signature, hash: Default::default() } } /// Decodes legacy transaction from the data buffer into a tuple. @@ -1321,7 +1338,8 @@ impl TransactionSigned { // so decoding methods do not need to manually advance the buffer pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; - let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + let signed = + Self { transaction: Transaction::Legacy(transaction), hash: hash.into(), signature }; Ok(signed) } } @@ -1330,7 +1348,7 @@ impl SignedTransaction for TransactionSigned { type Transaction = Transaction; fn tx_hash(&self) -> &TxHash { - &self.hash + self.hash_ref() } fn transaction(&self) -> &Self::Transaction { @@ -1608,19 +1626,19 @@ impl Decodable2718 for TransactionSigned { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash: hash.into() }) } TxType::Eip1559 => { let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash: hash.into() }) } TxType::Eip7702 => { let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash: hash.into() }) } TxType::Eip4844 => { let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash: hash.into() }) } #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( @@ -1661,7 +1679,6 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { #[cfg(feature = "optimism")] let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; - Ok(Self::from_transaction_and_signature(transaction, signature)) } } @@ -1900,7 +1917,7 @@ pub mod serde_bincode_compat { impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { fn from(value: &'a super::TransactionSigned) -> Self { Self { - hash: value.hash, + hash: value.hash(), signature: value.signature, transaction: Transaction::from(&value.transaction), } @@ -1910,7 +1927,7 @@ pub mod serde_bincode_compat { impl<'a> From> for super::TransactionSigned { fn from(value: TransactionSigned<'a>) -> Self { Self { - hash: value.hash, + hash: value.hash.into(), signature: value.signature, transaction: value.transaction.into(), } @@ -2203,7 +2220,7 @@ mod tests { ) { let expected = TransactionSigned::from_transaction_and_signature(transaction, signature); if let Some(hash) = hash { - assert_eq!(hash, expected.hash); + assert_eq!(hash, expected.hash()); } assert_eq!(bytes.len(), expected.length()); diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 86cd40a8fe6..05ad4afa87f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -69,17 +69,18 @@ impl PooledTransactionsElement { /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. pub fn try_from_broadcast(tx: TransactionSigned) -> Result { + let hash = tx.hash(); match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { Ok(Self::Legacy { transaction: tx, signature, hash }) } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { Ok(Self::Eip2930 { transaction: tx, signature, hash }) } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { Ok(Self::Eip1559 { transaction: tx, signature, hash }) } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { Ok(Self::Eip7702 { transaction: tx, signature, hash }) } // Not supported because missing blob sidecar @@ -99,9 +100,10 @@ impl PooledTransactionsElement { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { + let hash = tx.hash(); Ok(match tx { // If the transaction is an EIP-4844 transaction... - TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip4844(tx), signature, .. } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. Self::BlobTransaction(BlobTransaction { signature, @@ -187,23 +189,25 @@ impl PooledTransactionsElement { /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Legacy { transaction, signature, hash } => { - TransactionSigned { transaction: Transaction::Legacy(transaction), signature, hash } - } + Self::Legacy { transaction, signature, hash } => TransactionSigned { + transaction: Transaction::Legacy(transaction), + signature, + hash: hash.into(), + }, Self::Eip2930 { transaction, signature, hash } => TransactionSigned { transaction: Transaction::Eip2930(transaction), signature, - hash, + hash: hash.into(), }, Self::Eip1559 { transaction, signature, hash } => TransactionSigned { transaction: Transaction::Eip1559(transaction), signature, - hash, + hash: hash.into(), }, Self::Eip7702 { transaction, signature, hash } => TransactionSigned { transaction: Transaction::Eip7702(transaction), signature, - hash, + hash: hash.into(), }, Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } @@ -460,7 +464,7 @@ impl Decodable2718 for PooledTransactionsElement { } tx_type => { let typed_tx = TransactionSigned::typed_decode(tx_type, buf)?; - + let hash = typed_tx.hash(); match typed_tx.transaction { Transaction::Legacy(_) => Err(RlpError::Custom( "legacy transactions should not be a result of typed decoding", @@ -473,17 +477,17 @@ impl Decodable2718 for PooledTransactionsElement { Transaction::Eip2930(tx) => Ok(Self::Eip2930 { transaction: tx, signature: typed_tx.signature, - hash: typed_tx.hash, + hash }), Transaction::Eip1559(tx) => Ok(Self::Eip1559 { transaction: tx, signature: typed_tx.signature, - hash: typed_tx.hash, + hash }), Transaction::Eip7702(tx) => Ok(Self::Eip7702 { transaction: tx, signature: typed_tx.signature, - hash: typed_tx.hash, + hash }), #[cfg(feature = "optimism")] Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 48a02f4e740..ec8c9b7f0eb 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -31,7 +31,8 @@ impl BlobTransaction { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { - let TransactionSigned { transaction, signature, hash } = tx; + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx; match transaction { Transaction::Eip4844(transaction) => Ok(Self { hash, @@ -39,7 +40,7 @@ impl BlobTransaction { signature, }), transaction => { - let tx = TransactionSigned { transaction, signature, hash }; + let tx = TransactionSigned { transaction, signature, hash: hash.into() }; Err((tx, sidecar)) } } @@ -61,7 +62,7 @@ impl BlobTransaction { pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { let transaction = TransactionSigned { transaction: Transaction::Eip4844(self.transaction.tx), - hash: self.hash, + hash: self.hash.into(), signature: self.signature, }; diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index 888c83946ca..dd47df9a869 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -36,8 +36,8 @@ impl TransactionSignedVariant { pub fn hash(&self) -> B256 { match self { Self::SignedNoHash(tx) => tx.hash(), - Self::Signed(tx) => tx.hash, - Self::SignedEcRecovered(tx) => tx.hash, + Self::Signed(tx) => tx.hash(), + Self::SignedEcRecovered(tx) => tx.hash(), } } diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 2df8cccf305..ada4019302e 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -142,7 +142,7 @@ mod tests { for block in &blocks { tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { - tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); + tx_hash_numbers.push((transaction.hash(), tx_hash_numbers.len() as u64)); } } let tx_hash_numbers_len = tx_hash_numbers.len(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 1eade554fc1..d7e74c37b56 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -628,7 +628,7 @@ pub trait Call: LoadState> + SpawnBlocking { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 104042d17a2..a1e6084da55 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -204,7 +204,7 @@ pub trait Trace: LoadState> { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 78040b48c5f..dd6bf9bbc24 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -107,7 +107,7 @@ where let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; while let Some((index, (signer, tx))) = transactions.next() { - let tx_hash = tx.hash; + let tx_hash = tx.hash(); let env = EnvWithHandlerCfg { env: Env::boxed( @@ -255,7 +255,7 @@ where cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg { @@ -274,7 +274,7 @@ where Some(TransactionContext { block_hash: Some(block_hash), tx_index: Some(index), - tx_hash: Some(tx.hash), + tx_hash: Some(tx.hash()), }), &mut None, ) diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index fd3b9db9da2..bc1e9344799 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -42,7 +42,7 @@ where .enumerate() .map(|(idx, (tx, receipt))| { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 8f135a9103b..157213b54e6 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -41,7 +41,8 @@ where tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); let inner: TxEnvelope = match transaction { reth_primitives::Transaction::Legacy(tx) => { diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 3fdcbd0da64..5208cc936ce 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -383,7 +383,7 @@ mod tests { for block in &blocks[..=max_processed_block] { for transaction in &block.body.transactions { if block.number > max_pruned_block { - tx_hash_numbers.push((transaction.hash, tx_hash_number)); + tx_hash_numbers.push((transaction.hash(), tx_hash_number)); } tx_hash_number += 1; } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 354eb10c103..7d94fb98a80 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -725,7 +725,10 @@ mod tests { provider.transaction_sender(0), Ok(Some(sender)) if sender == block.body.transactions[0].recover_signer().unwrap() ); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(Some(0))); + assert_matches!( + provider.transaction_id(block.body.transactions[0].hash()), + Ok(Some(0)) + ); } { @@ -743,7 +746,7 @@ mod tests { Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(None)); + assert_matches!(provider.transaction_id(block.body.transactions[0].hash()), Ok(None)); } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8c390b06c08..d35e0a971a3 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -551,7 +551,7 @@ impl DatabaseProvider { .map(|tx| match transaction_kind { TransactionVariant::NoHash => TransactionSigned { // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, + hash: Default::default(), signature: tx.signature, transaction: tx.transaction, }, @@ -1500,7 +1500,7 @@ impl> Transaction fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { - hash, + hash: hash.into(), signature: tx.signature, transaction: tx.transaction, })) @@ -1518,7 +1518,7 @@ impl> Transaction if let Some(transaction_id) = self.transaction_id(tx_hash)? { if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { let transaction = TransactionSigned { - hash: tx_hash, + hash: tx_hash.into(), signature: tx.signature, transaction: tx.transaction, }; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 3259eee2bfb..2b8dc0f85ca 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -89,7 +89,7 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo ), body: BlockBody { transactions: vec![TransactionSigned { - hash: hex!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), + hash: b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), signature: Signature::new( U256::from_str( "51983300959770368863831494747186777928121405155922056726144551509338672451120", diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 63d6e30eea0..d58abe9b462 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -43,7 +43,7 @@ impl BlobStoreCanonTracker { .body .transactions() .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash); + .map(|tx| tx.hash()); (*num, iter) }); self.add_blocks(blob_txs); @@ -128,18 +128,18 @@ mod tests { body: BlockBody { transactions: vec![ TransactionSigned { - hash: tx1_hash, + hash: tx1_hash.into(), transaction: Transaction::Eip4844(Default::default()), ..Default::default() }, TransactionSigned { - hash: tx2_hash, + hash: tx2_hash.into(), transaction: Transaction::Eip4844(Default::default()), ..Default::default() }, // Another transaction that is not EIP-4844 TransactionSigned { - hash: B256::random(), + hash: B256::random().into(), transaction: Transaction::Eip7702(Default::default()), ..Default::default() }, @@ -161,12 +161,12 @@ mod tests { body: BlockBody { transactions: vec![ TransactionSigned { - hash: tx3_hash, + hash: tx3_hash.into(), transaction: Transaction::Eip1559(Default::default()), ..Default::default() }, TransactionSigned { - hash: tx2_hash, + hash: tx2_hash.into(), transaction: Transaction::Eip2930(Default::default()), ..Default::default() }, diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 271c63a388a..47e70e91433 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -317,7 +317,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(&tx.hash)) + .filter(|tx| !new_mined_transactions.contains(tx.hash_ref())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for @@ -325,7 +325,7 @@ pub async fn maintain_transaction_pool( // been validated previously, we still need the blob in order to // accurately set the transaction's // encoded-length which is propagated over the network. - pool.get_blob(tx.hash) + pool.get_blob(TransactionSigned::hash(&tx)) .ok() .flatten() .map(Arc::unwrap_or_clone) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 344781b1f58..009543642ff 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -911,7 +911,7 @@ impl From for MockTransaction { impl From for TransactionSignedEcRecovered { fn from(tx: MockTransaction) -> Self { let signed_tx = TransactionSigned { - hash: *tx.hash(), + hash: (*tx.hash()).into(), signature: Signature::test_signature(), transaction: tx.clone().into(), }; diff --git a/docs/crates/network.md b/docs/crates/network.md index a6ac2430565..be2c7cb3b14 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -991,9 +991,9 @@ fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec { // transaction was already inserted entry.get_mut().push(peer_id); diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 0f7d1a269f3..f3b7fdf5842 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -92,16 +92,17 @@ fn txs_provider_example(provider: T) -> eyre::Result<() // Can query the tx by hash let tx_by_hash = - provider.transaction_by_hash(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + provider.transaction_by_hash(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(tx, tx_by_hash); // Can query the tx by hash with info about the block it was included in - let (tx, meta) = - provider.transaction_by_hash_with_meta(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; - assert_eq!(tx.hash, meta.tx_hash); + let (tx, meta) = provider + .transaction_by_hash_with_meta(tx.hash())? + .ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx.hash(), meta.tx_hash); // Can reverse lookup the key too - let id = provider.transaction_id(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + let id = provider.transaction_id(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); // Can find the block of a transaction given its key @@ -171,7 +172,7 @@ fn receipts_provider_example Date: Fri, 22 Nov 2024 00:29:08 +0100 Subject: [PATCH 618/970] chore: remove txext trait (#12760) --- crates/net/network/src/transactions/mod.rs | 6 +-- crates/primitives-traits/src/lib.rs | 2 +- .../primitives-traits/src/transaction/mod.rs | 29 +++---------- .../src/transaction/signed.rs | 42 +++++-------------- crates/primitives/src/transaction/mod.rs | 37 ++++------------ 5 files changed, 27 insertions(+), 89 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 1c93ae54971..9628dbb4f1b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -49,7 +49,7 @@ use reth_network_p2p::{ use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; use reth_primitives::{PooledTransactionsElement, TransactionSigned}; -use reth_primitives_traits::{SignedTransaction, TransactionExt, TxType}; +use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, @@ -1617,7 +1617,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if !transaction.transaction.transaction().tx_type().is_broadcastable_in_full() { + if !transaction.transaction.tx_type().is_broadcastable_in_full() { self.pooled.push(transaction); return } @@ -1683,7 +1683,7 @@ impl PooledTransactionsHashesBuilder { Self::Eth68(msg) => { msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); - msg.types.push(tx.transaction.transaction().tx_type().into()); + msg.types.push(tx.transaction.tx_type().into()); } } } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 5c969152d8d..c149c6cd7e4 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -29,7 +29,7 @@ pub use transaction::{ execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, tx_type::{FullTxType, TxType}, - FullTransaction, Transaction, TransactionExt, + FullTransaction, Transaction, }; mod integer_list; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 7647c94496f..f176382146b 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -4,16 +4,13 @@ pub mod execute; pub mod signed; pub mod tx_type; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; -use alloy_primitives::B256; - -use crate::{FullTxType, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; - /// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + MaybeCompact {} +pub trait FullTransaction: Transaction + MaybeCompact {} -impl FullTransaction for T where T: Transaction + MaybeCompact {} +impl FullTransaction for T where T: Transaction + MaybeCompact {} /// Abstraction of a transaction. pub trait Transaction: @@ -26,7 +23,7 @@ pub trait Transaction: + Eq + PartialEq + Hash - + TransactionExt + + alloy_consensus::Transaction + InMemorySize + MaybeSerde + MaybeArbitrary @@ -43,25 +40,9 @@ impl Transaction for T where + Eq + PartialEq + Hash - + TransactionExt + + alloy_consensus::Transaction + InMemorySize + MaybeSerde + MaybeArbitrary { } - -/// Extension trait of [`alloy_consensus::Transaction`]. -#[auto_impl::auto_impl(&, Arc)] -pub trait TransactionExt: alloy_consensus::Transaction { - /// Transaction envelope type ID. - type Type: TxType; - - /// Heavy operation that return signature hash over rlp encoded transaction. - /// It is only for signature signing or signer recovery. - fn signature_hash(&self) -> B256; - - /// Returns the transaction type. - fn tx_type(&self) -> Self::Type { - Self::Type::try_from(self.ty()).expect("should decode tx type id") - } -} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 563f3a6f336..64acbd3415c 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,25 +1,15 @@ //! API of a signed transaction. +use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; use alloc::fmt; -use core::hash::Hash; - use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; - -use crate::{ - FillTxEnv, FullTransaction, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, Transaction, -}; +use core::hash::Hash; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullSignedTx: - SignedTransaction + FillTxEnv + MaybeCompact -{ -} +pub trait FullSignedTx: SignedTransaction + FillTxEnv + MaybeCompact {} -impl FullSignedTx for T where - T: SignedTransaction + FillTxEnv + MaybeCompact -{ -} +impl FullSignedTx for T where T: SignedTransaction + FillTxEnv + MaybeCompact {} /// A signed transaction. #[auto_impl::auto_impl(&, Arc)] @@ -42,15 +32,17 @@ pub trait SignedTransaction: + MaybeArbitrary + InMemorySize { - /// Unsigned transaction type. - type Transaction: Transaction; + /// Transaction envelope type ID. + type Type: TxType; + + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; - /// Returns reference to transaction. - fn transaction(&self) -> &Self::Transaction; - /// Returns reference to signature. fn signature(&self) -> &PrimitiveSignature; @@ -78,15 +70,3 @@ pub trait SignedTransaction: keccak256(self.encoded_2718()) } } - -/// Helper trait used in testing. -#[cfg(feature = "test-utils")] -pub trait SignedTransactionTesting: SignedTransaction { - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: PrimitiveSignature, - ) -> Self; -} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 5900abb42c9..1e313ca8b2b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,5 +1,6 @@ //! Transaction types. +use alloc::vec::Vec; use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, @@ -24,21 +25,23 @@ use once_cell as _; use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; +#[cfg(feature = "optimism")] +use op_alloy_consensus::TxDeposit; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] use std::sync::{LazyLock, OnceLock}; +pub use compat::FillTxEnv; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; pub use sidecar::BlobTransaction; - -pub use compat::FillTxEnv; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; pub use variant::TransactionSignedVariant; @@ -58,12 +61,6 @@ pub mod signature; pub(crate) mod util; mod variant; -use alloc::vec::Vec; -#[cfg(feature = "optimism")] -use op_alloy_consensus::TxDeposit; -use reth_primitives_traits::{transaction::TransactionExt, SignedTransaction}; -use revm_primitives::{AuthorizationList, TxEnv}; - /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; @@ -839,22 +836,6 @@ impl alloy_consensus::Transaction for Transaction { } } -impl TransactionExt for Transaction { - type Type = TxType; - - fn signature_hash(&self) -> B256 { - match self { - Self::Legacy(tx) => tx.signature_hash(), - Self::Eip2930(tx) => tx.signature_hash(), - Self::Eip1559(tx) => tx.signature_hash(), - Self::Eip4844(tx) => tx.signature_hash(), - Self::Eip7702(tx) => tx.signature_hash(), - #[cfg(feature = "optimism")] - _ => todo!("use op type for op"), - } - } -} - /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -1345,16 +1326,12 @@ impl TransactionSigned { } impl SignedTransaction for TransactionSigned { - type Transaction = Transaction; + type Type = TxType; fn tx_hash(&self) -> &TxHash { self.hash_ref() } - fn transaction(&self) -> &Self::Transaction { - &self.transaction - } - fn signature(&self) -> &Signature { &self.signature } From 0eaef1f1dcbe229b5964cf1622b3018b4a1f80b9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 00:48:38 +0100 Subject: [PATCH 619/970] chore: rm unused error variants (#12763) --- crates/storage/errors/src/provider.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index b6fcee545d5..152427a128b 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,6 +1,6 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; use derive_more::Display; use reth_primitives::{GotExpected, StaticFileSegment, TxHashOrNumber}; @@ -81,15 +81,6 @@ pub enum ProviderError { /// Unable to find the safe block. #[display("safe block does not exist")] SafeBlockNotFound, - /// Mismatch of sender and transaction. - #[display("mismatch of sender and transaction id {tx_id}")] - MismatchOfTransactionAndSenderId { - /// The transaction ID. - tx_id: TxNumber, - }, - /// Block body wrong transaction count. - #[display("stored block indices does not match transaction count")] - BlockBodyTransactionCount, /// Thrown when the cache service task dropped. #[display("cache service task stopped")] CacheServiceUnavailable, @@ -139,9 +130,6 @@ pub enum ProviderError { /// Static File Provider was initialized as read-only. #[display("cannot get a writer on a read-only environment.")] ReadOnlyStaticFileAccess, - /// Error encountered when the block number conversion from U256 to u64 causes an overflow. - #[display("failed to convert block number U256 to u64: {_0}")] - BlockNumberOverflow(U256), /// Consistent view error. #[display("failed to initialize consistent view: {_0}")] ConsistentView(Box), From f211aacf551afbf707b7fb6a40d17c8d2264110e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 01:07:04 +0100 Subject: [PATCH 620/970] chore: rm tx alias re-export (#12762) --- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 4 ---- crates/storage/errors/src/provider.rs | 20 ++++++++------------ 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index c46c437dd71..7999588e49d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -48,7 +48,7 @@ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, BlobTransaction, InvalidTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; // Re-exports diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 1e313ca8b2b..1ac7b4394e0 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -6,7 +6,6 @@ use alloy_consensus::{ TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ - eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, eip7702::SignedAuthorization, @@ -61,9 +60,6 @@ pub mod signature; pub(crate) mod util; mod variant; -/// Either a transaction hash or number. -pub type TxHashOrNumber = BlockHashOrNumber; - /// Expected number of transactions where we can expect a speed-up by recovering the senders in /// parallel. pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 152427a128b..9e6720b8440 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,13 +1,9 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; -use alloy_eips::BlockHashOrNumber; +use alloc::{boxed::Box, string::String}; +use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment, TxHashOrNumber}; - -#[cfg(feature = "std")] -use std::path::PathBuf; - -use alloc::{boxed::Box, string::String}; +use reth_primitives::{GotExpected, StaticFileSegment}; /// Provider result type. pub type ProviderResult = Result; @@ -66,12 +62,12 @@ pub enum ProviderError { /// when required header related data was not found but was required. #[display("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), - /// The specific transaction is missing. + /// The specific transaction identified by hash or id is missing. #[display("no transaction found for {_0:?}")] - TransactionNotFound(TxHashOrNumber), - /// The specific receipt is missing + TransactionNotFound(HashOrNumber), + /// The specific receipt for a transaction identified by hash or id is missing #[display("no receipt found for {_0:?}")] - ReceiptNotFound(TxHashOrNumber), + ReceiptNotFound(HashOrNumber), /// Unable to find the best block. #[display("best block does not exist")] BestBlockNotFound, @@ -111,7 +107,7 @@ pub enum ProviderError { /// Static File is not found at specified path. #[cfg(feature = "std")] #[display("not able to find {_0} static file at {_1:?}")] - MissingStaticFilePath(StaticFileSegment, PathBuf), + MissingStaticFilePath(StaticFileSegment, std::path::PathBuf), /// Static File is not found for requested block. #[display("not able to find {_0} static file for block number {_1}")] MissingStaticFileBlock(StaticFileSegment, BlockNumber), From d3b68656c2118e7ae6f76d8a132579068f74246f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 22 Nov 2024 12:30:56 +0400 Subject: [PATCH 621/970] fix: always truncate static files (#12765) --- .../provider/src/providers/database/provider.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d35e0a971a3..ff80213fdf3 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3030,13 +3030,13 @@ impl BlockWriter .static_file_provider .get_highest_static_file_tx(StaticFileSegment::Transactions); - if let Some(static_tx) = static_file_tx_num { - if static_tx >= unwind_tx_from { - self.static_file_provider - .latest_writer(StaticFileSegment::Transactions)? - .prune_transactions(static_tx - unwind_tx_from + 1, block)?; - } - } + let to_delete = static_file_tx_num + .map(|static_tx| (static_tx + 1).saturating_sub(unwind_tx_from)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Transactions)? + .prune_transactions(to_delete, block)?; } Ok(()) From 7d24aa40e82dab393cff77dffaf976f059153f83 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 22 Nov 2024 09:44:56 +0100 Subject: [PATCH 622/970] chore(trie): log proof result send error (#12749) --- crates/trie/parallel/src/proof.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 88321c821a8..dcb1a0231dd 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -22,7 +22,7 @@ use reth_trie::{ use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::sync::Arc; -use tracing::debug; +use tracing::{debug, error}; #[cfg(feature = "metrics")] use crate::metrics::ParallelStateRootMetrics; @@ -126,7 +126,9 @@ where )) }) })(); - let _ = tx.send(result); + if let Err(err) = tx.send(result) { + error!(target: "trie::parallel", ?hashed_address, err_content = ?err.0, "Failed to send proof result"); + } }); storage_proofs.insert(hashed_address, rx); } From 3765ae244408ed8bf8243e2b7f48e46d7f872f2b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 11:37:34 +0100 Subject: [PATCH 623/970] feat: add TransactionSigned::new fns (#12768) --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/chain-state/src/test_utils.rs | 3 +- crates/consensus/common/src/validation.rs | 2 +- crates/net/eth-wire-types/src/blocks.rs | 10 +++---- crates/net/eth-wire-types/src/transactions.rs | 28 +++++++++--------- crates/net/network/tests/it/requests.rs | 2 +- crates/net/network/tests/it/txgossip.rs | 5 +--- crates/optimism/evm/src/execute.rs | 8 ++--- crates/optimism/node/src/txpool.rs | 2 +- crates/optimism/node/tests/it/priority.rs | 5 +--- crates/primitives/src/transaction/mod.rs | 29 +++++++++++-------- crates/transaction-pool/src/test_utils/gen.rs | 2 +- crates/transaction-pool/src/traits.rs | 10 +++---- testing/testing-utils/src/generators.rs | 4 +-- 14 files changed, 55 insertions(+), 57 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index c778e0508da..67b200e6484 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1570,7 +1570,7 @@ mod tests { let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 63689f07f03..af0c363fe48 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -102,8 +102,7 @@ impl TestBlockBuilder { let signature_hash = tx.signature_hash(); let signature = self.signer_pk.sign_hash_sync(&signature_hash).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) - .with_signer(self.signer) + TransactionSigned::new_unhashed(tx, signature).with_signer(self.signer) }; let num_txs = rng.gen_range(0..5); diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 62357b4b9b1..6042f16bf50 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -450,7 +450,7 @@ mod tests { let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } /// got test block diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 06549e769e6..97bbe36b3d6 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -342,7 +342,7 @@ mod tests { message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, @@ -356,7 +356,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, @@ -413,7 +413,7 @@ mod tests { message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy( + TransactionSigned::new_unhashed(Transaction::Legacy( TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -423,13 +423,13 @@ mod tests { value: U256::from(0x200u64), input: Default::default(), }), - Signature::new( + Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 26f62b7f76a..ca76f0a8c7e 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -130,7 +130,7 @@ mod tests { let expected = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let mut data = vec![]; let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -152,7 +152,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -196,7 +196,7 @@ mod tests { fn decode_pooled_transactions() { let data = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -218,7 +218,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -260,7 +260,7 @@ mod tests { let decoded_transactions = RequestPair::::decode(&mut &data[..]).unwrap(); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -282,7 +282,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -306,7 +306,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -328,7 +328,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -350,7 +350,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -397,7 +397,7 @@ mod tests { fn encode_pooled_transactions_network() { let expected = hex!("f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -419,7 +419,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -443,7 +443,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -465,7 +465,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -487,7 +487,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 54e1f4e12b4..0dd38c959de 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -33,7 +33,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { }); let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 98624c4c609..ebde61ef8ea 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -132,10 +132,7 @@ async fn test_sending_invalid_transactions() { value: Default::default(), input: Default::default(), }; - let tx = TransactionSigned::from_transaction_and_signature( - tx.into(), - Signature::test_signature(), - ); + let tx = TransactionSigned::new_unhashed(tx.into(), Signature::test_signature()); peer0.network().send_transactions(*peer1.peer_id(), vec![Arc::new(tx)]); } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index a9a4b301573..042b8e29193 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -377,7 +377,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -388,7 +388,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), @@ -461,7 +461,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -472,7 +472,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 7df5888fb75..a5616569c86 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -265,7 +265,7 @@ mod tests { input: Default::default(), }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(deposit_tx, signature); + let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); let signed_recovered = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index f1260d2da01..c1df9180ce3 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -63,10 +63,7 @@ impl OpPayloadTransactions for CustomTxPriority { }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( - TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(end_of_block_tx), - signature, - ), + TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), sender.address(), ); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 1ac7b4394e0..ba0d3f31617 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -914,7 +914,7 @@ impl TransactionSignedNoHash { #[inline] pub fn with_hash(self) -> TransactionSigned { let Self { signature, transaction } = self; - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Recovers a list of signers from a transaction list iterator @@ -1105,6 +1105,18 @@ impl PartialEq for TransactionSigned { // === impl TransactionSigned === impl TransactionSigned { + /// Creates a new signed transaction from the given parts. + pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { + Self { hash: hash.into(), signature, transaction } + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction } + } + /// Transaction signature. pub const fn signature(&self) -> &Signature { &self.signature @@ -1251,13 +1263,6 @@ impl TransactionSigned { keccak256(self.encoded_2718()) } - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - Self { transaction, signature, hash: Default::default() } - } - /// Decodes legacy transaction from the data buffer into a tuple. /// /// This expects `rlp(legacy_tx)` @@ -1614,7 +1619,7 @@ impl Decodable2718 for TransactionSigned { Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash: hash.into() }) } #[cfg(feature = "optimism")] - TxType::Deposit => Ok(Self::from_transaction_and_signature( + TxType::Deposit => Ok(Self::new_unhashed( Transaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), @@ -1652,7 +1657,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { #[cfg(feature = "optimism")] let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; - Ok(Self::from_transaction_and_signature(transaction, signature)) + Ok(Self::new_unhashed(transaction, signature)) } } @@ -2191,7 +2196,7 @@ mod tests { signature: Signature, hash: Option, ) { - let expected = TransactionSigned::from_transaction_and_signature(transaction, signature); + let expected = TransactionSigned::new_unhashed(transaction, signature); if let Some(hash) = hash { assert_eq!(hash, expected.hash()); } @@ -2288,7 +2293,7 @@ mod tests { let signature = crate::sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) }).collect(); let parallel_senders = TransactionSigned::recover_signers(&txes, txes.len()).unwrap(); diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 858098ec91a..95a179aec81 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -199,7 +199,7 @@ impl TransactionBuilder { /// Signs the provided transaction using the specified signer and returns a signed transaction. fn signed(transaction: Transaction, signer: B256) -> TransactionSigned { let signature = sign_message(signer, transaction.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Sets the signer for the transaction builder. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 68a911f2e2e..cfdfcc07dd9 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1584,7 +1584,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1606,7 +1606,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1628,7 +1628,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1652,7 +1652,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); @@ -1676,7 +1676,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 582298feab9..d8f3a29790b 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -149,7 +149,7 @@ pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionS let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) } /// Generates a set of [Keypair]s based on the desired count. @@ -479,7 +479,7 @@ mod tests { sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) .unwrap(); - let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature); + let signed = TransactionSigned::new_unhashed(tx.clone(), signature); let recovered = signed.recover_signer().unwrap(); let expected = public_key_to_address(key_pair.public_key()); From a163929724670e7ef785d2d1c8d2572c4ce1299c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 11:58:20 +0100 Subject: [PATCH 624/970] feat: add signed conversions (#12772) --- crates/primitives/src/transaction/mod.rs | 37 +++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ba0d3f31617..6a085ac8d24 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -2,7 +2,7 @@ use alloc::vec::Vec; use alloy_consensus::{ - transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, + transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ @@ -1263,6 +1263,12 @@ impl TransactionSigned { keccak256(self.encoded_2718()) } + /// Splits the transaction into parts. + pub fn into_parts(self) -> (Transaction, Signature, B256) { + let hash = self.hash(); + (self.transaction, self.signature, hash) + } + /// Decodes legacy transaction from the data buffer into a tuple. /// /// This expects `rlp(legacy_tx)` @@ -1631,6 +1637,35 @@ impl Decodable2718 for TransactionSigned { } } +macro_rules! impl_from_signed { + ($($tx:ident),*) => { + $( + impl From> for TransactionSigned { + fn from(value: Signed<$tx>) -> Self { + let(tx,sig,hash) = value.into_parts(); + Self::new(tx.into(), sig, hash) + } + } + )* + }; +} + +impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); + +impl From> for TransactionSigned { + fn from(value: Signed) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new(tx, sig, hash) + } +} + +impl From for Signed { + fn from(value: TransactionSigned) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new_unchecked(tx, sig, hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { From ef3e0b360ff972ba76e5ffbcf3c29f7d753ab2bf Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:31:06 +0700 Subject: [PATCH 625/970] perf(op-payload): remove unneeded clone (#12771) --- crates/optimism/payload/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 1050a55eb6e..5926cfd34c5 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -350,7 +350,7 @@ where let block_number = ctx.block_number(); let execution_outcome = ExecutionOutcome::new( state.take_bundle(), - vec![info.receipts.clone()].into(), + vec![info.receipts].into(), block_number, Vec::new(), ); From f2126f2c0562e4bdf896343f45e803b88ac9396b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 12:06:42 +0100 Subject: [PATCH 626/970] chore: move withencoded struct (#12770) --- crates/primitives-traits/src/encoded.rs | 55 ++++++++++++++++++++++++ crates/primitives-traits/src/lib.rs | 3 +- crates/primitives/src/transaction/mod.rs | 55 +----------------------- 3 files changed, 58 insertions(+), 55 deletions(-) create mode 100644 crates/primitives-traits/src/encoded.rs diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs new file mode 100644 index 00000000000..b162fc93343 --- /dev/null +++ b/crates/primitives-traits/src/encoded.rs @@ -0,0 +1,55 @@ +use alloy_primitives::Bytes; + +/// Generic wrapper with encoded Bytes, such as transaction data. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WithEncoded(Bytes, pub T); + +impl From<(Bytes, T)> for WithEncoded { + fn from(value: (Bytes, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithEncoded { + /// Wraps the value with the bytes. + pub const fn new(bytes: Bytes, value: T) -> Self { + Self(bytes, value) + } + + /// Get the encoded bytes + pub fn encoded_bytes(&self) -> Bytes { + self.0.clone() + } + + /// Get the underlying value + pub const fn value(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying value. + pub fn into_value(self) -> T { + self.1 + } + + /// Transform the value + pub fn transform>(self) -> WithEncoded { + WithEncoded(self.0, self.1.into()) + } + + /// Split the wrapper into [`Bytes`] and value tuple + pub fn split(self) -> (Bytes, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithEncoded { + WithEncoded(self.0, op(self.1)) + } +} + +impl WithEncoded> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithEncoded(self.0, v)) + } +} diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index c149c6cd7e4..4d068b2ff4d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,7 +14,6 @@ extern crate alloc; /// Common constants. pub mod constants; - pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account @@ -42,7 +41,9 @@ pub use block::{ Block, FullBlock, }; +mod encoded; mod withdrawal; +pub use encoded::WithEncoded; mod error; pub use error::{GotExpected, GotExpectedBoxed}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 6a085ac8d24..2e274311a03 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -40,6 +40,7 @@ pub use error::{ }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +pub use reth_primitives_traits::WithEncoded; pub use sidecar::BlobTransaction; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; @@ -1764,60 +1765,6 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Generic wrapper with encoded Bytes, such as transaction data. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct WithEncoded(Bytes, pub T); - -impl From<(Bytes, T)> for WithEncoded { - fn from(value: (Bytes, T)) -> Self { - Self(value.0, value.1) - } -} - -impl WithEncoded { - /// Wraps the value with the bytes. - pub const fn new(bytes: Bytes, value: T) -> Self { - Self(bytes, value) - } - - /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() - } - - /// Get the underlying value - pub const fn value(&self) -> &T { - &self.1 - } - - /// Returns ownership of the underlying value. - pub fn into_value(self) -> T { - self.1 - } - - /// Transform the value - pub fn transform>(self) -> WithEncoded { - WithEncoded(self.0, self.1.into()) - } - - /// Split the wrapper into [`Bytes`] and value tuple - pub fn split(self) -> (Bytes, T) { - (self.0, self.1) - } - - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithEncoded { - WithEncoded(self.0, op(self.1)) - } -} - -impl WithEncoded> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithEncoded(self.0, v)) - } -} - /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { From 3d93b81a7ed4496d64a719ad5808d1abd0d29ca0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 12:19:03 +0100 Subject: [PATCH 627/970] chore: replace pooled elements with signed (#12773) --- crates/primitives/src/transaction/pooled.rs | 205 +++++++------------- 1 file changed, 68 insertions(+), 137 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 05ad4afa87f..00f62c24372 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -3,14 +3,15 @@ use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; -use alloy_eips::eip4844::BlobTransactionSidecar; - use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, TxEip4844WithSidecar, + SignableTransaction, Signed, Transaction as _, TxEip4844WithSidecar, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Result, Encodable2718}, + eip4844::BlobTransactionSidecar, }; -use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; @@ -22,42 +23,14 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum PooledTransactionsElement { - /// A legacy transaction - Legacy { - /// The inner transaction - transaction: TxLegacy, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-2930 typed transaction - Eip2930 { - /// The inner transaction - transaction: TxEip2930, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-1559 typed transaction - Eip1559 { - /// The inner transaction - transaction: TxEip1559, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-7702 typed transaction - Eip7702 { - /// The inner transaction - transaction: TxEip7702, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, + /// An untagged [`TxLegacy`]. + Legacy(Signed), + /// A [`TxEip2930`] tagged with type 1. + Eip2930(Signed), + /// A [`TxEip1559`] tagged with type 2. + Eip1559(Signed), + /// A [`TxEip7702`] tagged with type 4. + Eip7702(Signed), /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. BlobTransaction(BlobTransaction), } @@ -72,16 +45,16 @@ impl PooledTransactionsElement { let hash = tx.hash(); match tx { TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { - Ok(Self::Legacy { transaction: tx, signature, hash }) + Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) } TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { - Ok(Self::Eip2930 { transaction: tx, signature, hash }) + Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) } TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { - Ok(Self::Eip1559 { transaction: tx, signature, hash }) + Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) } TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { - Ok(Self::Eip7702 { transaction: tx, signature, hash }) + Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) } // Not supported because missing blob sidecar tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), @@ -121,10 +94,10 @@ impl PooledTransactionsElement { /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { match self { - Self::Legacy { transaction, .. } => transaction.signature_hash(), - Self::Eip2930 { transaction, .. } => transaction.signature_hash(), - Self::Eip1559 { transaction, .. } => transaction.signature_hash(), - Self::Eip7702 { transaction, .. } => transaction.signature_hash(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), } } @@ -132,10 +105,10 @@ impl PooledTransactionsElement { /// Reference to transaction hash. Used to identify transaction. pub const fn hash(&self) -> &TxHash { match self { - Self::Legacy { hash, .. } | - Self::Eip2930 { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip7702 { hash, .. } => hash, + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), Self::BlobTransaction(tx) => &tx.hash, } } @@ -143,21 +116,21 @@ impl PooledTransactionsElement { /// Returns the signature of the transaction. pub const fn signature(&self) -> &Signature { match self { - Self::Legacy { signature, .. } | - Self::Eip2930 { signature, .. } | - Self::Eip1559 { signature, .. } | - Self::Eip7702 { signature, .. } => signature, + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), Self::BlobTransaction(blob_tx) => &blob_tx.signature, } } /// Returns the transaction nonce. - pub const fn nonce(&self) -> u64 { + pub fn nonce(&self) -> u64 { match self { - Self::Legacy { transaction, .. } => transaction.nonce, - Self::Eip2930 { transaction, .. } => transaction.nonce, - Self::Eip1559 { transaction, .. } => transaction.nonce, - Self::Eip7702 { transaction, .. } => transaction.nonce, + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, } } @@ -189,26 +162,10 @@ impl PooledTransactionsElement { /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Legacy { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Legacy(transaction), - signature, - hash: hash.into(), - }, - Self::Eip2930 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip2930(transaction), - signature, - hash: hash.into(), - }, - Self::Eip1559 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip1559(transaction), - signature, - hash: hash.into(), - }, - Self::Eip7702 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip7702(transaction), - signature, - hash: hash.into(), - }, + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } } @@ -222,7 +179,7 @@ impl PooledTransactionsElement { /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { - Self::Legacy { transaction, .. } => Some(transaction), + Self::Legacy(tx) => Some(tx.tx()), _ => None, } } @@ -230,7 +187,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { - Self::Eip2930 { transaction, .. } => Some(transaction), + Self::Eip2930(tx) => Some(tx.tx()), _ => None, } } @@ -238,7 +195,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { - Self::Eip1559 { transaction, .. } => Some(transaction), + Self::Eip1559(tx) => Some(tx.tx()), _ => None, } } @@ -254,7 +211,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip7702`] variant if the transaction is an EIP-7702 transaction. pub const fn as_eip7702(&self) -> Option<&TxEip7702> { match self { - Self::Eip7702 { transaction, .. } => Some(transaction), + Self::Eip7702(tx) => Some(tx.tx()), _ => None, } } @@ -286,9 +243,9 @@ impl PooledTransactionsElement { /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). pub const fn max_priority_fee_per_gas(&self) -> Option { match self { - Self::Legacy { .. } | Self::Eip2930 { .. } => None, - Self::Eip1559 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::Eip7702 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), + Self::Legacy(_) | Self::Eip2930(_) => None, + Self::Eip1559(tx) => Some(tx.tx().max_priority_fee_per_gas), + Self::Eip7702(tx) => Some(tx.tx().max_priority_fee_per_gas), Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), } } @@ -298,10 +255,10 @@ impl PooledTransactionsElement { /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). pub const fn max_fee_per_gas(&self) -> u128 { match self { - Self::Legacy { transaction, .. } => transaction.gas_price, - Self::Eip2930 { transaction, .. } => transaction.gas_price, - Self::Eip1559 { transaction, .. } => transaction.max_fee_per_gas, - Self::Eip7702 { transaction, .. } => transaction.max_fee_per_gas, + Self::Legacy(tx) => tx.tx().gas_price, + Self::Eip2930(tx) => tx.tx().gas_price, + Self::Eip1559(tx) => tx.tx().max_fee_per_gas, + Self::Eip7702(tx) => tx.tx().max_fee_per_gas, Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, } } @@ -391,28 +348,20 @@ impl Decodable for PooledTransactionsElement { impl Encodable2718 for PooledTransactionsElement { fn type_flag(&self) -> Option { match self { - Self::Legacy { .. } => None, - Self::Eip2930 { .. } => Some(0x01), - Self::Eip1559 { .. } => Some(0x02), - Self::BlobTransaction { .. } => Some(0x03), - Self::Eip7702 { .. } => Some(0x04), + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(0x01), + Self::Eip1559(_) => Some(0x02), + Self::BlobTransaction(_) => Some(0x03), + Self::Eip7702(_) => Some(0x04), } } fn encode_2718_len(&self) -> usize { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } + Self::Legacy(tx) => tx.eip2718_encoded_length(), + Self::Eip2930(tx) => tx.eip2718_encoded_length(), + Self::Eip1559(tx) => tx.eip2718_encoded_length(), + Self::Eip7702(tx) => tx.eip2718_encoded_length(), Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { transaction.eip2718_encoded_length(signature) } @@ -421,18 +370,10 @@ impl Encodable2718 for PooledTransactionsElement { fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => tx.eip2718_encode(out), + Self::Eip1559(tx) => tx.eip2718_encode(out), + Self::Eip7702(tx) => tx.eip2718_encode(out), Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { transaction.eip2718_encode(signature, out) } @@ -474,21 +415,11 @@ impl Decodable2718 for PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", ).into()), - Transaction::Eip2930(tx) => Ok(Self::Eip2930 { - transaction: tx, - signature: typed_tx.signature, - hash - }), - Transaction::Eip1559(tx) => Ok(Self::Eip1559 { - transaction: tx, - signature: typed_tx.signature, - hash - }), - Transaction::Eip7702(tx) => Ok(Self::Eip7702 { - transaction: tx, - signature: typed_tx.signature, - hash - }), + Transaction::Eip2930(tx) => Ok(Self::Eip2930 ( + Signed::new_unchecked(tx, typed_tx.signature, hash) + )), + Transaction::Eip1559(tx) => Ok(Self::Eip1559( Signed::new_unchecked(tx, typed_tx.signature, hash))), + Transaction::Eip7702(tx) => Ok(Self::Eip7702( Signed::new_unchecked(tx, typed_tx.signature, hash))), #[cfg(feature = "optimism")] Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) } @@ -501,7 +432,7 @@ impl Decodable2718 for PooledTransactionsElement { let (transaction, hash, signature) = TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; - Ok(Self::Legacy { transaction, signature, hash }) + Ok(Self::Legacy(Signed::new_unchecked(transaction, signature, hash))) } } From 7f5fd80cb7d69d1e5d14abf6851111adae762970 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 22 Nov 2024 15:49:25 +0400 Subject: [PATCH 628/970] feat: integrate `SignedTx` AT into `StaticFileProviderRW` (#12764) --- Cargo.lock | 3 + crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/externals.rs | 6 +- crates/cli/commands/src/common.rs | 12 +--- crates/consensus/beacon/Cargo.toml | 3 + .../beacon/src/engine/hooks/static_file.rs | 14 +++-- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/engine/tree/src/persistence.rs | 6 +- crates/exex/exex/src/backfill/test_utils.rs | 6 +- crates/node/builder/src/setup.rs | 4 +- crates/optimism/primitives/src/lib.rs | 4 +- crates/primitives-traits/src/node.rs | 58 +++++++++++-------- crates/primitives/src/lib.rs | 2 +- .../stages/src/stages/hashing_account.rs | 3 +- crates/static-file/static-file/Cargo.toml | 2 + .../static-file/src/segments/transactions.rs | 16 +++-- .../static-file/src/static_file_producer.rs | 8 ++- .../src/providers/blockchain_provider.rs | 3 +- .../src/providers/database/provider.rs | 11 ++-- .../provider/src/providers/static_file/mod.rs | 10 ++-- .../src/providers/static_file/writer.rs | 5 +- 21 files changed, 105 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eabde10a0a7..e4a6687defb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6456,6 +6456,7 @@ dependencies = [ "reth-blockchain-tree", "reth-blockchain-tree-api", "reth-chainspec", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -9159,8 +9160,10 @@ dependencies = [ "assert_matches", "parking_lot", "rayon", + "reth-codecs", "reth-db", "reth-db-api", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 67b200e6484..d2ff0f5c844 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1424,7 +1424,7 @@ mod tests { } fn setup_genesis< - N: ProviderNodeTypes>, + N: ProviderNodeTypes>, >( factory: &ProviderFactory, mut genesis: SealedBlock, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 76b65824854..bf5a243a5a5 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,7 +4,7 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::{Block, FullNodePrimitives, NodeTypesWithDB}; +use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; use reth_primitives::{BlockBody, StaticFileSegment}; use reth_provider::{ providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, @@ -15,11 +15,11 @@ use std::{collections::BTreeMap, sync::Arc}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. pub trait TreeNodeTypes: - ProviderNodeTypes>> + ProviderNodeTypes> { } impl TreeNodeTypes for T where - T: ProviderNodeTypes>> + T: ProviderNodeTypes> { } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index e557f15da6b..251e01a105a 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -197,19 +197,11 @@ impl AccessRights { /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. pub trait CliNodeTypes: NodeTypesWithEngine - + NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block: reth_node_api::Block, - >, - > + + NodeTypesForProvider> { } impl CliNodeTypes for N where N: NodeTypesWithEngine - + NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block: reth_node_api::Block, - >, - > + + NodeTypesForProvider> { } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 245ebe8541e..65994557c06 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -14,6 +14,8 @@ workspace = true # reth reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true +reth-codecs.workspace = true +reth-db-api.workspace = true reth-primitives.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true @@ -80,6 +82,7 @@ assert_matches.workspace = true [features] optimism = [ "reth-blockchain-tree/optimism", + "reth-codecs/optimism", "reth-chainspec", "reth-db-api/optimism", "reth-db/optimism", diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 99854209cb3..7cd286f659c 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -6,8 +6,10 @@ use crate::{ }; use alloy_primitives::BlockNumber; use futures::FutureExt; +use reth_codecs::Compact; +use reth_db_api::table::Value; use reth_errors::RethResult; -use reth_primitives::static_file::HighestStaticFiles; +use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; use reth_provider::{ BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -33,8 +35,9 @@ impl StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory - + StageCheckpointReader + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + BlockReader + ChainStateBlockReader, > + 'static, @@ -148,8 +151,9 @@ impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory - + StageCheckpointReader + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + BlockReader + ChainStateBlockReader, > + 'static, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 2ad06e68b67..0fedbdd452d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2172,7 +2172,7 @@ mod tests { fn insert_blocks< 'a, - N: ProviderNodeTypes>, + N: ProviderNodeTypes>, >( provider_factory: ProviderFactory, mut blocks: impl Iterator, diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 0199ae3f461..86d18ceb48c 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -3,7 +3,7 @@ use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; use reth_primitives::BlockBody; -use reth_primitives_traits::{Block, FullNodePrimitives}; +use reth_primitives_traits::FullNodePrimitives; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -21,11 +21,11 @@ use tracing::{debug, error}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within /// [`PersistenceService`]. pub trait PersistenceNodeTypes: - ProviderNodeTypes>> + ProviderNodeTypes> { } impl PersistenceNodeTypes for T where - T: ProviderNodeTypes>> + T: ProviderNodeTypes> { } /// Writes parts of reth's in memory tree state to the database and static files. diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 5d0f88f517d..169d2d758de 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -58,7 +58,7 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes>, { let provider = provider_factory.provider()?; @@ -162,7 +162,7 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes>, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -184,7 +184,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 71f0ceb56cd..3258ba8fe54 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,7 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; -use reth_node_api::FullNodePrimitives; +use reth_node_api::{FullNodePrimitives, NodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -88,7 +88,7 @@ where N: ProviderNodeTypes, H: HeaderDownloader
+ 'static, B: BodyDownloader< - Body = <::Block as reth_node_api::Block>::Body, + Body = <::Block as reth_node_api::Block>::Body, > + 'static, Executor: BlockExecutorProvider, N::Primitives: FullNodePrimitives, diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 26499bb43af..334440ea106 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -14,14 +14,14 @@ pub mod tx_type; pub use tx_type::OpTxType; use alloy_consensus::Header; -use reth_node_types::FullNodePrimitives; +use reth_node_types::NodePrimitives; use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned}; /// Optimism primitive types. #[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct OpPrimitives; -impl FullNodePrimitives for OpPrimitives { +impl NodePrimitives for OpPrimitives { type Block = Block; type BlockHeader = Header; type BlockBody = BlockBody; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 7cb321e9af3..19f6bd8456a 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -77,33 +77,45 @@ impl NodePrimitives for () { } /// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives: - Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +pub trait FullNodePrimitives +where + Self: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static, { - /// Block primitive. - type Block: FullBlock
; - /// Block header primitive. - type BlockHeader: FullBlockHeader + 'static; - /// Block body primitive. - type BlockBody: FullBlockBody + 'static; - /// Signed version of the transaction type. - type SignedTx: FullSignedTx; - /// Transaction envelope type ID. - type TxType: FullTxType; - /// A receipt. - type Receipt: FullReceipt; } -impl NodePrimitives for T -where - T: FullNodePrimitives, +impl FullNodePrimitives for T where + T: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static { - type Block = T::Block; - type BlockHeader = T::BlockHeader; - type BlockBody = T::BlockBody; - type SignedTx = T::SignedTx; - type TxType = T::TxType; - type Receipt = T::Receipt; } /// Helper adapter type for accessing [`NodePrimitives`] receipt type. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 7999588e49d..203880209a2 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -79,7 +79,7 @@ pub mod serde_bincode_compat { #[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct EthPrimitives; -impl reth_primitives_traits::FullNodePrimitives for EthPrimitives { +impl reth_primitives_traits::NodePrimitives for EthPrimitives { type Block = crate::Block; type BlockHeader = alloy_consensus::Header; type BlockBody = crate::BlockBody; diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index ecca1e0716c..e6b1e548455 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -63,7 +63,8 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> where - N::Primitives: reth_primitives_traits::FullNodePrimitives, + N::Primitives: + reth_primitives_traits::FullNodePrimitives, { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index d22b116cdc5..89f60687895 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,12 +13,14 @@ workspace = true [dependencies] # reth +reth-codecs.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 59ec94be9e4..168ae94817b 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,7 +1,9 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; @@ -13,8 +15,11 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment - for Transactions +impl Segment for Transactions +where + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader, { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions @@ -38,8 +43,9 @@ impl Segment()?; + let mut transactions_cursor = provider.tx_ref().cursor_read::::Primitives as NodePrimitives>::SignedTx, + >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 8959819e821..371a344d872 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -4,6 +4,9 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; +use reth_codecs::Compact; +use reth_db::table::Value; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -86,7 +89,10 @@ impl StaticFileProducerInner where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory + StageCheckpointReader + BlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader, >, { /// Listen for events on the `static_file_producer`. diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 083e7fb596b..74009ffff59 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -889,8 +889,7 @@ mod tests { static_file_provider.latest_writer(StaticFileSegment::Transactions)?; transactions_writer.increment_block(block.number)?; for tx in block.body.transactions() { - let tx: TransactionSignedNoHash = tx.clone().into(); - transactions_writer.append_transaction(tx_num, &tx)?; + transactions_writer.append_transaction(tx_num, tx)?; tx_num += 1; } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ff80213fdf3..e6c3842976f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -48,11 +48,11 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; -use reth_primitives_traits::{BlockBody as _, FullNodePrimitives, SignedTransaction}; +use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; @@ -2778,8 +2778,7 @@ impl BlockExecutio impl BlockWriter for DatabaseProvider { - type Body = - <::Block as reth_primitives_traits::Block>::Body; + type Body = <::Block as reth_primitives_traits::Block>::Body; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 30b8d0344da..58a9e3bb378 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -68,7 +68,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Receipt, TransactionSignedNoHash, + EthPrimitives, Receipt, TransactionSigned, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; @@ -304,20 +304,20 @@ mod tests { /// * `10..=19`: no txs/receipts /// * `20..=29`: only one tx/receipt fn setup_tx_based_scenario( - sf_rw: &StaticFileProvider<()>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, blocks_per_file: u64, ) { fn setup_block_ranges( - writer: &mut StaticFileProviderRWRefMut<'_, ()>, - sf_rw: &StaticFileProvider<()>, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, block_range: &Range, mut tx_count: u64, next_tx_num: &mut u64, ) { let mut receipt = Receipt::default(); - let mut tx = TransactionSignedNoHash::default(); + let mut tx = TransactionSigned::default(); for block in block_range.clone() { writer.increment_block(block).unwrap(); diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 5951dbb751f..83954bde352 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -558,7 +558,10 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_transaction(&mut self, tx_num: TxNumber, tx: impl Compact) -> ProviderResult<()> { + pub fn append_transaction(&mut self, tx_num: TxNumber, tx: &N::SignedTx) -> ProviderResult<()> + where + N::SignedTx: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; From 73cd92f5f51fed011e3b340c881ff9f404534090 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 22 Nov 2024 20:31:44 +0700 Subject: [PATCH 629/970] chore(pending-pool): remove unused `all` txs (#12777) --- crates/transaction-pool/src/pool/pending.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ee2bcd96e84..89e673aad99 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -8,7 +8,7 @@ use crate::{ }; use std::{ cmp::Ordering, - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -34,8 +34,6 @@ pub struct PendingPool { submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, - /// _All_ transactions sorted by priority - all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. highest_nonces: HashMap>, @@ -61,7 +59,6 @@ impl PendingPool { ordering, submission_id: 0, by_id: Default::default(), - all: Default::default(), independent_transactions: Default::default(), highest_nonces: Default::default(), size_of: Default::default(), @@ -78,7 +75,6 @@ impl PendingPool { fn clear_transactions(&mut self) -> BTreeMap> { self.independent_transactions.clear(); self.highest_nonces.clear(); - self.all.clear(); self.size_of.reset(); std::mem::take(&mut self.by_id) } @@ -194,7 +190,6 @@ impl PendingPool { } else { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -240,7 +235,6 @@ impl PendingPool { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -307,7 +301,6 @@ impl PendingPool { let tx = PendingTransaction { submission_id, transaction: tx, priority }; self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { @@ -337,7 +330,6 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - self.all.remove(&tx); if let Some(highest) = self.highest_nonces.get(&id.sender) { if highest.transaction.nonce() == id.nonce { @@ -538,13 +530,12 @@ impl PendingPool { /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { - assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); assert!( - self.independent_transactions.len() <= self.all.len(), + self.independent_transactions.len() <= self.by_id.len(), "independent.len() > all.len()" ); assert!( - self.highest_nonces.len() <= self.all.len(), + self.highest_nonces.len() <= self.by_id.len(), "independent_descendants.len() > all.len()" ); assert_eq!( From 26ecda223f21fc36c5043a4673da5fdebae428f2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 14:33:43 +0100 Subject: [PATCH 630/970] chore: some additional pool docs (#12776) --- crates/transaction-pool/src/traits.rs | 70 ++++++++++++++++----------- 1 file changed, 41 insertions(+), 29 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index cfdfcc07dd9..c45584c50a7 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -921,7 +921,15 @@ impl BestTransactionsAttributes { } } -/// Trait for transaction types used inside the pool +/// Trait for transaction types used inside the pool. +/// +/// This supports two transaction formats +/// - Consensus format: the form the transaction takes when it is included in a block. +/// - Pooled format: the form the transaction takes when it is gossiping around the network. +/// +/// This distinction is necessary for the EIP-4844 blob transactions, which require an additional +/// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is +/// a subset of the `Pooled` format. pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Associated error type for the `try_from_consensus` method. type TryFromConsensusError; @@ -1066,7 +1074,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { } } -/// Super trait for transactions that can be converted to and from Eth transactions +/// Super trait for transactions that can be converted to and from Eth transactions intended for the +/// ethereum style pool. +/// +/// This extends the [`PoolTransaction`] trait with additional methods that are specific to the +/// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< Consensus: From @@ -1097,9 +1109,9 @@ pub trait EthPoolTransaction: /// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { - /// `EcRecovered` transaction info - pub(crate) transaction: TransactionSignedEcRecovered, +pub struct EthPooledTransaction { + /// `EcRecovered` transaction, the consensus format. + pub(crate) transaction: T, /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. @@ -1115,30 +1127,6 @@ pub struct EthPooledTransaction { pub(crate) blob_sidecar: EthBlobTransactionSidecar, } -/// Represents the blob sidecar of the [`EthPooledTransaction`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum EthBlobTransactionSidecar { - /// This transaction does not have a blob sidecar - None, - /// This transaction has a blob sidecar (EIP-4844) but it is missing - /// - /// It was either extracted after being inserted into the pool or re-injected after reorg - /// without the blob sidecar - Missing, - /// The eip-4844 transaction was pulled from the network and still has its blob sidecar - Present(BlobTransactionSidecar), -} - -impl EthBlobTransactionSidecar { - /// Returns the blob sidecar if it is present - pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { - match self { - Self::Present(sidecar) => Some(sidecar), - _ => None, - } - } -} - impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1403,6 +1391,30 @@ impl From for TransactionSignedEcRecovered { } } +/// Represents the blob sidecar of the [`EthPooledTransaction`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EthBlobTransactionSidecar { + /// This transaction does not have a blob sidecar + None, + /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// + /// It was either extracted after being inserted into the pool or re-injected after reorg + /// without the blob sidecar + Missing, + /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + Present(BlobTransactionSidecar), +} + +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + Self::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + /// Represents the current status of the pool. #[derive(Debug, Clone, Copy, Default)] pub struct PoolSize { From 852fba65248b0eb77f6b72a16197df613bfff963 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 14:34:09 +0100 Subject: [PATCH 631/970] chore: replace blobtx fields with signed (#12774) --- crates/primitives/src/transaction/pooled.rs | 57 ++++++++++---------- crates/primitives/src/transaction/sidecar.rs | 40 +++++--------- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 3 files changed, 45 insertions(+), 54 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 00f62c24372..c6ab623829b 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -5,8 +5,8 @@ use super::{error::TransactionConversionError, signature::recover_signer, TxEip7 use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, - transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, Signed, Transaction as _, TxEip4844WithSidecar, + transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + Signed, Transaction as _, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -78,11 +78,11 @@ impl PooledTransactionsElement { // If the transaction is an EIP-4844 transaction... TransactionSigned { transaction: Transaction::Eip4844(tx), signature, .. } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - Self::BlobTransaction(BlobTransaction { + Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, signature, hash, - transaction: TxEip4844WithSidecar { tx, sidecar }, - }) + ))) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -98,7 +98,7 @@ impl PooledTransactionsElement { Self::Eip2930(tx) => tx.signature_hash(), Self::Eip1559(tx) => tx.signature_hash(), Self::Eip7702(tx) => tx.signature_hash(), - Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), + Self::BlobTransaction(tx) => tx.signature_hash(), } } @@ -109,7 +109,7 @@ impl PooledTransactionsElement { Self::Eip2930(tx) => tx.hash(), Self::Eip1559(tx) => tx.hash(), Self::Eip7702(tx) => tx.hash(), - Self::BlobTransaction(tx) => &tx.hash, + Self::BlobTransaction(tx) => tx.0.hash(), } } @@ -120,7 +120,7 @@ impl PooledTransactionsElement { Self::Eip2930(tx) => tx.signature(), Self::Eip1559(tx) => tx.signature(), Self::Eip7702(tx) => tx.signature(), - Self::BlobTransaction(blob_tx) => &blob_tx.signature, + Self::BlobTransaction(tx) => tx.0.signature(), } } @@ -131,7 +131,7 @@ impl PooledTransactionsElement { Self::Eip2930(tx) => tx.tx().nonce(), Self::Eip1559(tx) => tx.tx().nonce(), Self::Eip7702(tx) => tx.tx().nonce(), - Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, + Self::BlobTransaction(tx) => tx.tx().nonce(), } } @@ -203,7 +203,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Self::BlobTransaction(tx) => Some(&tx.transaction.tx), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx()), _ => None, } } @@ -232,7 +232,7 @@ impl PooledTransactionsElement { /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). pub const fn max_fee_per_blob_gas(&self) -> Option { match self { - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_fee_per_blob_gas), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx.max_fee_per_blob_gas), _ => None, } } @@ -246,7 +246,7 @@ impl PooledTransactionsElement { Self::Legacy(_) | Self::Eip2930(_) => None, Self::Eip1559(tx) => Some(tx.tx().max_priority_fee_per_gas), Self::Eip7702(tx) => Some(tx.tx().max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx.max_priority_fee_per_gas), } } @@ -259,7 +259,7 @@ impl PooledTransactionsElement { Self::Eip2930(tx) => tx.tx().gas_price, Self::Eip1559(tx) => tx.tx().max_fee_per_gas, Self::Eip7702(tx) => tx.tx().max_fee_per_gas, - Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, + Self::BlobTransaction(tx) => tx.0.tx().tx.max_fee_per_gas, } } } @@ -362,9 +362,7 @@ impl Encodable2718 for PooledTransactionsElement { Self::Eip2930(tx) => tx.eip2718_encoded_length(), Self::Eip1559(tx) => tx.eip2718_encoded_length(), Self::Eip7702(tx) => tx.eip2718_encoded_length(), - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encoded_length(signature) - } + Self::BlobTransaction(tx) => tx.eip2718_encoded_length(), } } @@ -374,9 +372,7 @@ impl Encodable2718 for PooledTransactionsElement { Self::Eip2930(tx) => tx.eip2718_encode(out), Self::Eip1559(tx) => tx.eip2718_encode(out), Self::Eip7702(tx) => tx.eip2718_encode(out), - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encode(signature, out) - } + Self::BlobTransaction(tx) => tx.eip2718_encode(out), } } @@ -457,15 +453,22 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from(tx_signed) { - Ok(Self::BlobTransaction(mut tx)) => { - // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = alloy_eips::eip4844::BlobTransactionSidecar::arbitrary(u)?; - Ok(Self::BlobTransaction(tx)) + match Self::try_from_broadcast(tx_signed) { + Ok(tx) => Ok(tx), + Err(tx) => { + let (tx, sig, hash) = tx.into_parts(); + match tx { + Transaction::Eip4844(tx) => { + let sidecar = BlobTransactionSidecar::arbitrary(u)?; + Ok(Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, + sig, + hash, + )))) + } + _ => Err(arbitrary::Error::IncorrectFormat), + } } - Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. - Err(_) => Err(arbitrary::Error::IncorrectFormat), /* Conversion failed, return an - * arbitrary error. */ } } } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index ec8c9b7f0eb..5eeeef09fc3 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,9 +1,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Transaction, TransactionSigned}; -use alloy_consensus::{transaction::RlpEcdsaTx, TxEip4844WithSidecar}; +use alloy_consensus::{transaction::RlpEcdsaTx, Signed, TxEip4844WithSidecar}; use alloy_eips::eip4844::BlobTransactionSidecar; -use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; +use derive_more::Deref; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -11,16 +11,8 @@ use serde::{Deserialize, Serialize}; /// /// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element /// of a `PooledTransactions` response. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobTransaction { - /// The transaction hash. - pub hash: TxHash, - /// The transaction signature. - pub signature: Signature, - /// The transaction payload with the sidecar. - #[serde(flatten)] - pub transaction: TxEip4844WithSidecar, -} +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Deref)] +pub struct BlobTransaction(pub Signed); impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a @@ -34,11 +26,11 @@ impl BlobTransaction { let hash = tx.hash(); let TransactionSigned { transaction, signature, .. } = tx; match transaction { - Transaction::Eip4844(transaction) => Ok(Self { - hash, - transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, + Transaction::Eip4844(transaction) => Ok(Self(Signed::new_unchecked( + TxEip4844WithSidecar { tx: transaction, sidecar }, signature, - }), + hash, + ))), transaction => { let tx = TransactionSigned { transaction, signature, hash: hash.into() }; Err((tx, sidecar)) @@ -54,19 +46,16 @@ impl BlobTransaction { &self, proof_settings: &c_kzg::KzgSettings, ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - self.transaction.validate_blob(proof_settings) + self.tx().validate_blob(proof_settings) } /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { - let transaction = TransactionSigned { - transaction: Transaction::Eip4844(self.transaction.tx), - hash: self.hash.into(), - signature: self.signature, - }; - - (transaction, self.transaction.sidecar) + let (transaction, signature, hash) = self.0.into_parts(); + let (transaction, sidecar) = transaction.into_parts(); + let transaction = TransactionSigned::new(transaction.into(), signature, hash); + (transaction, sidecar) } /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: @@ -80,8 +69,7 @@ impl BlobTransaction { pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, signature, hash) = TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); - - Ok(Self { transaction, hash, signature }) + Ok(Self(Signed::new_unchecked(transaction, signature, hash))) } } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index f92bd075a3b..ee2b3ed5e7c 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -87,7 +87,7 @@ where .iter() .filter_map(|(tx, _)| { if let PooledTransactionsElement::BlobTransaction(tx) = tx { - Some(tx.transaction.tx.blob_gas()) + Some(tx.tx().tx().blob_gas()) } else { None } From 3384c84f6fec12d71d8714326f93a2a250d9e82a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 14:47:40 +0100 Subject: [PATCH 632/970] chore: more useful tx conversions (#12778) --- crates/primitives/src/transaction/mod.rs | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2e274311a03..4091e51f3ed 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, - TxEip4844, TxEip7702, TxLegacy, + TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, TypedTransaction, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -833,6 +833,27 @@ impl alloy_consensus::Transaction for Transaction { } } +impl From for Transaction { + fn from(value: TxEip4844Variant) -> Self { + match value { + TxEip4844Variant::TxEip4844(tx) => tx.into(), + TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx.into(), + } + } +} + +impl From for Transaction { + fn from(value: TypedTransaction) -> Self { + match value { + TypedTransaction::Legacy(tx) => tx.into(), + TypedTransaction::Eip2930(tx) => tx.into(), + TypedTransaction::Eip1559(tx) => tx.into(), + TypedTransaction::Eip4844(tx) => tx.into(), + TypedTransaction::Eip7702(tx) => tx.into(), + } + } +} + /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -1651,7 +1672,7 @@ macro_rules! impl_from_signed { }; } -impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); +impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844, TypedTransaction); impl From> for TransactionSigned { fn from(value: Signed) -> Self { From 87ecb434135294565b51a0ddc36d9aab146a9e23 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 22 Nov 2024 07:52:08 -0600 Subject: [PATCH 633/970] replace BlockWithSenders with fn (#12695) --- crates/chain-state/src/in_memory.rs | 2 +- crates/primitives/src/block.rs | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index e07eaeaa5d9..24f394a761f 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -619,7 +619,7 @@ impl BlockState { pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders { block: block.unseal(), senders } + BlockWithSenders::new_unchecked(block.unseal(), senders) } /// Returns the sealed block with senders for the state. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 57c63d53a43..a93b1cf538a 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -73,7 +73,7 @@ impl Block { senders }; - Ok(BlockWithSenders { block: self, senders }) + Ok(BlockWithSenders::new_unchecked(self, senders)) } /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained @@ -82,7 +82,7 @@ impl Block { /// Returns `None` if a transaction is invalid. pub fn with_recovered_senders(self) -> Option { let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) + Some(BlockWithSenders::new_unchecked(self, senders)) } } @@ -214,6 +214,11 @@ pub struct BlockWithSenders { } impl BlockWithSenders { + /// New block with senders + pub const fn new_unchecked(block: Block, senders: Vec
) -> Self { + Self { block, senders } + } + /// New block with senders. Return none if len of tx and senders does not match pub fn new(block: Block, senders: Vec
) -> Option { (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) @@ -527,7 +532,7 @@ impl SealedBlockWithSenders { #[inline] pub fn unseal(self) -> BlockWithSenders { let Self { block, senders } = self; - BlockWithSenders { block: block.unseal(), senders } + BlockWithSenders::new_unchecked(block.unseal(), senders) } /// Returns an iterator over all transactions in the block. From 64728e0856dc4267ac7f5a65567f3fde9fc559b8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 22 Nov 2024 21:15:43 +0700 Subject: [PATCH 634/970] refactor: simplify withdrawals outcome (#12780) --- crates/ethereum/payload/src/lib.rs | 12 ++++--- crates/optimism/payload/src/builder.rs | 24 ++++++-------- crates/payload/basic/src/lib.rs | 43 ++++---------------------- 3 files changed, 23 insertions(+), 56 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index ac6427caf36..f38c93613dc 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -14,7 +14,7 @@ use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEAC use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, - PayloadConfig, WithdrawalsOutcome, + PayloadConfig, }; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; @@ -356,8 +356,8 @@ where None }; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + let withdrawals_root = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, &attributes.withdrawals)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call @@ -443,7 +443,11 @@ where // seal the block let block = Block { header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, + body: BlockBody { + transactions: executed_txs, + ommers: vec![], + withdrawals: Some(attributes.withdrawals.clone()), + }, }; let sealed_block = Arc::new(block.seal_slow()); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 5926cfd34c5..eaf9e86e773 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -4,7 +4,7 @@ use std::{fmt::Display, sync::Arc}; use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; @@ -318,13 +318,13 @@ where } } - let withdrawals_outcome = ctx.commit_withdrawals(state)?; + let withdrawals_root = ctx.commit_withdrawals(state)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call state.merge_transitions(BundleRetention::Reverts); - Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_outcome } }) + Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_root } }) } /// Builds the payload on top of the state. @@ -338,10 +338,7 @@ where DB: Database + AsRef

, P: StateRootProvider, { - let ExecutedPayload { - info, - withdrawals_outcome: WithdrawalsOutcome { withdrawals, withdrawals_root }, - } = match self.execute(&mut state, &ctx)? { + let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), @@ -419,7 +416,7 @@ where body: BlockBody { transactions: info.executed_transactions, ommers: vec![], - withdrawals, + withdrawals: Some(ctx.attributes().payload_attributes.withdrawals.clone()), }, }; @@ -501,8 +498,8 @@ impl OpPayloadTransactions for () { pub struct ExecutedPayload { /// Tracked execution info pub info: ExecutionInfo, - /// Outcome after committing withdrawals. - pub withdrawals_outcome: WithdrawalsOutcome, + /// Withdrawal hash. + pub withdrawals_root: Option, } /// This acts as the container for executed transactions and its byproducts (receipts, gas used) @@ -652,10 +649,7 @@ impl OpPayloadBuilderCtx { } /// Commits the withdrawals from the payload attributes to the state. - pub fn commit_withdrawals( - &self, - db: &mut State, - ) -> Result + pub fn commit_withdrawals(&self, db: &mut State) -> Result, ProviderError> where DB: Database, { @@ -663,7 +657,7 @@ impl OpPayloadBuilderCtx { db, &self.chain_spec, self.attributes().payload_attributes.timestamp, - self.attributes().payload_attributes.withdrawals.clone(), + &self.attributes().payload_attributes.withdrawals, ) } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index e3193ec6deb..0ab411d3e60 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -979,31 +979,6 @@ impl Default for MissingPayloadBehaviour { } } -/// Represents the outcome of committing withdrawals to the runtime database and post state. -/// Pre-shanghai these are `None` values. -#[derive(Default, Debug)] -pub struct WithdrawalsOutcome { - /// committed withdrawals, if any. - pub withdrawals: Option, - /// withdrawals root if any. - pub withdrawals_root: Option, -} - -impl WithdrawalsOutcome { - /// No withdrawals pre shanghai - pub const fn pre_shanghai() -> Self { - Self { withdrawals: None, withdrawals_root: None } - } - - /// No withdrawals - pub fn empty() -> Self { - Self { - withdrawals: Some(Withdrawals::default()), - withdrawals_root: Some(EMPTY_WITHDRAWALS), - } - } -} - /// Executes the withdrawals and commits them to the _runtime_ Database and `BundleState`. /// /// Returns the withdrawals root. @@ -1013,32 +988,26 @@ pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, - withdrawals: Withdrawals, -) -> Result + withdrawals: &Withdrawals, +) -> Result, DB::Error> where DB: Database, ChainSpec: EthereumHardforks, { if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { - return Ok(WithdrawalsOutcome::pre_shanghai()) + return Ok(None) } if withdrawals.is_empty() { - return Ok(WithdrawalsOutcome::empty()) + return Ok(Some(EMPTY_WITHDRAWALS)) } let balance_increments = - post_block_withdrawals_balance_increments(chain_spec, timestamp, &withdrawals); + post_block_withdrawals_balance_increments(chain_spec, timestamp, withdrawals); db.increment_balances(balance_increments)?; - let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); - - // calculate withdrawals root - Ok(WithdrawalsOutcome { - withdrawals: Some(withdrawals), - withdrawals_root: Some(withdrawals_root), - }) + Ok(Some(proofs::calculate_withdrawals_root(withdrawals))) } /// Checks if the new payload is better than the current best. From f2860006f7692e833700b1e95a7a579659cf2243 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Fri, 22 Nov 2024 21:28:59 +0700 Subject: [PATCH 635/970] chore: replace TransactionSigned struct inits with new functions (#12779) --- .../network/tests/it/big_pooled_txs_req.rs | 7 +-- crates/primitives/src/transaction/sidecar.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 28 +++++----- .../stages/src/stages/sender_recovery.rs | 6 +-- .../src/providers/database/provider.rs | 23 +++------ .../storage/provider/src/test_utils/blocks.rs | 20 ++++---- .../transaction-pool/src/blobstore/tracker.rs | 51 ++++++++++--------- .../transaction-pool/src/test_utils/mock.rs | 7 +-- 8 files changed, 63 insertions(+), 81 deletions(-) diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 9e0f69160b6..328229e87e1 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -26,11 +26,8 @@ async fn test_large_tx_req() { // replace rng txhash with real txhash let mut tx = MockTransaction::eip1559(); - let ts = TransactionSigned { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let ts = + TransactionSigned::new_unhashed(tx.clone().into(), Signature::test_signature()); tx.set_hash(ts.recalculate_hash()); tx }) diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 5eeeef09fc3..c1b1b029afc 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -32,7 +32,7 @@ impl BlobTransaction { hash, ))), transaction => { - let tx = TransactionSigned { transaction, signature, hash: hash.into() }; + let tx = TransactionSigned::new(transaction, signature, hash); Err((tx, sidecar)) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index d6c8f522cda..dac37152942 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -407,7 +407,7 @@ impl EthApiInner DatabaseProvider { let body = transactions .into_iter() .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: Default::default(), - signature: tx.signature, - transaction: tx.transaction, - }, + TransactionVariant::NoHash => { + TransactionSigned::new_unhashed(tx.transaction, tx.signature) + } TransactionVariant::WithHash => tx.with_hash(), }) .collect(); @@ -1499,11 +1496,9 @@ impl> Transaction fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { - Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { - hash: hash.into(), - signature: tx.signature, - transaction: tx.transaction, - })) + Ok(self + .transaction_by_id_no_hash(id)? + .map(|tx| TransactionSigned::new(tx.transaction, tx.signature, hash))) } else { Ok(None) } @@ -1517,11 +1512,7 @@ impl> Transaction let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { - let transaction = TransactionSigned { - hash: tx_hash.into(), - signature: tx.signature, - transaction: tx.transaction, - }; + let transaction = TransactionSigned::new(tx.transaction, tx.signature, tx_hash); if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 2b8dc0f85ca..fdded2807aa 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -88,9 +88,14 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo hex!("cf7b274520720b50e6a4c3e5c4d553101f44945396827705518ce17cb7219a42").into(), ), body: BlockBody { - transactions: vec![TransactionSigned { - hash: b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), - signature: Signature::new( + transactions: vec![TransactionSigned::new( + Transaction::Legacy(TxLegacy { + gas_price: 10, + gas_limit: 400_000, + to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), + ..Default::default() + }), + Signature::new( U256::from_str( "51983300959770368863831494747186777928121405155922056726144551509338672451120", ) @@ -101,13 +106,8 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo .unwrap(), false, ), - transaction: Transaction::Legacy(TxLegacy { - gas_price: 10, - gas_limit: 400_000, - to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), - ..Default::default() - }), - }], + b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397"), + )], ..Default::default() }, }); diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index d58abe9b462..0f48c89a499 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -82,6 +82,7 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { use alloy_consensus::Header; + use alloy_primitives::PrimitiveSignature as Signature; use reth_execution_types::Chain; use reth_primitives::{ BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, @@ -127,22 +128,22 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx1_hash.into(), - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash.into(), - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx1_hash, + ), + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx2_hash, + ), // Another transaction that is not EIP-4844 - TransactionSigned { - hash: B256::random().into(), - transaction: Transaction::Eip7702(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip7702(Default::default()), + Signature::test_signature(), + B256::random(), + ), ], ..Default::default() }, @@ -160,16 +161,16 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx3_hash.into(), - transaction: Transaction::Eip1559(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash.into(), - transaction: Transaction::Eip2930(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip1559(Default::default()), + Signature::test_signature(), + tx3_hash, + ), + TransactionSigned::new( + Transaction::Eip2930(Default::default()), + Signature::test_signature(), + tx2_hash, + ), ], ..Default::default() }, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 009543642ff..849bde26548 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -910,11 +910,8 @@ impl From for MockTransaction { impl From for TransactionSignedEcRecovered { fn from(tx: MockTransaction) -> Self { - let signed_tx = TransactionSigned { - hash: (*tx.hash()).into(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let signed_tx = + TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); Self::from_signed_transaction(signed_tx, tx.sender()) } From 9a2eacdb9c929c4e25fe7e2b657303124928cce1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 15:53:14 +0100 Subject: [PATCH 636/970] chore: move traits used by legacy engine to separate mod (#12784) --- crates/storage/storage-api/src/legacy.rs | 83 ++++++++++++++++++++++++ crates/storage/storage-api/src/lib.rs | 3 + crates/storage/storage-api/src/state.rs | 79 +--------------------- 3 files changed, 88 insertions(+), 77 deletions(-) create mode 100644 crates/storage/storage-api/src/legacy.rs diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs new file mode 100644 index 00000000000..e53a5d8bfa2 --- /dev/null +++ b/crates/storage/storage-api/src/legacy.rs @@ -0,0 +1,83 @@ +//! Traits used by the legacy execution engine. +//! +//! This module is scheduled for removal in the future. + +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockHash, BlockNumber}; +use auto_impl::auto_impl; +use reth_execution_types::ExecutionOutcome; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Blockchain trait provider that gives access to the blockchain state that is not yet committed +/// (pending). +pub trait BlockchainTreePendingStateProvider: Send + Sync { + /// Returns a state provider that includes all state changes of the given (pending) block hash. + /// + /// In other words, the state provider will return the state after all transactions of the given + /// hash have been executed. + fn pending_state_provider( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) + } + + /// Returns state provider if a matching block exists. + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option>; +} + +/// Provides data required for post-block execution. +/// +/// This trait offers methods to access essential post-execution data, including the state changes +/// in accounts and storage, as well as block hashes for both the pending and canonical chains. +/// +/// The trait includes: +/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. +/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical +/// blocks. +#[auto_impl(&, Box)] +pub trait ExecutionDataProvider: Send + Sync { + /// Return the execution outcome. + fn execution_outcome(&self) -> &ExecutionOutcome; + /// Return block hash by block number of pending or canonical chain. + fn block_hash(&self, block_number: BlockNumber) -> Option; +} + +impl ExecutionDataProvider for ExecutionOutcome { + fn execution_outcome(&self) -> &ExecutionOutcome { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BlockExecutionForkProvider { + /// Return canonical fork, the block on what post state was forked from. + /// + /// Needed to create state provider. + fn canonical_fork(&self) -> BlockNumHash; +} + +/// Provides comprehensive post-execution state data required for further execution. +/// +/// This trait is used to create a state provider over the pending state and is a combination of +/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. +/// +/// The pending state includes: +/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. +/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. +/// * Canonical fork: Denotes the block from which the pending chain forked. +pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} + +impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index be52a817e93..de09e66f128 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -67,3 +67,6 @@ mod hashing; pub use hashing::*; mod stats; pub use stats::*; + +mod legacy; +pub use legacy::*; diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index d37940f0478..3174489fc4a 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -3,12 +3,11 @@ use super::{ StorageRootProvider, }; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; use reth_primitives::Bytecode; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::ProviderResult; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -167,77 +166,3 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// If the block couldn't be found, returns `None`. fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; } - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Provides data required for post-block execution. -/// -/// This trait offers methods to access essential post-execution data, including the state changes -/// in accounts and storage, as well as block hashes for both the pending and canonical chains. -/// -/// The trait includes: -/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. -/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical -/// blocks. -#[auto_impl(&, Box)] -pub trait ExecutionDataProvider: Send + Sync { - /// Return the execution outcome. - fn execution_outcome(&self) -> &ExecutionOutcome; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -impl ExecutionDataProvider for ExecutionOutcome { - fn execution_outcome(&self) -> &ExecutionOutcome { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BlockExecutionForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Provides comprehensive post-execution state data required for further execution. -/// -/// This trait is used to create a state provider over the pending state and is a combination of -/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. -/// -/// The pending state includes: -/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. -/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. -/// * Canonical fork: Denotes the block from which the pending chain forked. -pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} - -impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} From cbd9d6dc05f4c79d871f76852533a217f8ffc137 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 16:11:03 +0100 Subject: [PATCH 637/970] chore: remove no hash usage in tests (#12782) --- crates/primitives/src/transaction/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4091e51f3ed..2c639c7ffeb 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1995,7 +1995,7 @@ pub mod serde_bincode_compat { mod tests { use crate::{ transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; @@ -2378,17 +2378,17 @@ mod tests { input: Bytes::from(input), }); - let tx_signed_no_hash = TransactionSignedNoHash { signature, transaction }; - test_transaction_signed_to_from_compact(tx_signed_no_hash); + let tx = TransactionSigned::new_unhashed(transaction, signature); + test_transaction_signed_to_from_compact(tx); } } - fn test_transaction_signed_to_from_compact(tx_signed_no_hash: TransactionSignedNoHash) { + fn test_transaction_signed_to_from_compact(tx: TransactionSigned) { // zstd aware `to_compact` let mut buff: Vec = Vec::new(); - let written_bytes = tx_signed_no_hash.to_compact(&mut buff); - let (decoded, _) = TransactionSignedNoHash::from_compact(&buff, written_bytes); - assert_eq!(tx_signed_no_hash, decoded); + let written_bytes = tx.to_compact(&mut buff); + let (decoded, _) = TransactionSigned::from_compact(&buff, written_bytes); + assert_eq!(tx, decoded); } #[test] From ba1a1687b02902799a8486170ec15974a6be44ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 16:12:36 +0100 Subject: [PATCH 638/970] chore: simplify typed to signed tx (#12781) --- crates/rpc/rpc-eth-types/src/simulate.rs | 33 +++--------------------- 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index b24ec9e86bc..5a0daa1b42f 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,6 +1,6 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; +use alloy_consensus::{Transaction as _, TxType}; use alloy_primitives::PrimitiveSignature as Signature; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, @@ -10,7 +10,7 @@ use alloy_rpc_types_eth::{ use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, TransactionSigned, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; @@ -135,34 +135,7 @@ where // Create an empty signature for the transaction. let signature = Signature::new(Default::default(), Default::default(), false); - - let tx = match tx { - TypedTransaction::Legacy(tx) => { - TransactionSignedNoHash { transaction: Transaction::Legacy(tx), signature } - .with_hash() - } - TypedTransaction::Eip2930(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip2930(tx), signature } - .with_hash() - } - TypedTransaction::Eip1559(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip1559(tx), signature } - .with_hash() - } - TypedTransaction::Eip4844(tx) => { - let tx = match tx { - TxEip4844Variant::TxEip4844(tx) => tx, - TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx, - }; - TransactionSignedNoHash { transaction: Transaction::Eip4844(tx), signature } - .with_hash() - } - TypedTransaction::Eip7702(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip7702(tx), signature } - .with_hash() - } - }; - + let tx = TransactionSigned::new_unhashed(tx.into(), signature); transactions.push(tx); } From 7b156f058cf2b32659a85fd9c9955c9cee94214c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 16:29:04 +0100 Subject: [PATCH 639/970] feat: impl alloy transaction for pooled tx (#12787) --- crates/primitives/src/transaction/pooled.rs | 229 +++++++++++++++----- 1 file changed, 178 insertions(+), 51 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index c6ab623829b..bb840614703 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -6,13 +6,17 @@ use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEc use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - Signed, Transaction as _, TxEip4844WithSidecar, + Signed, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, + eip2930::AccessList, eip4844::BlobTransactionSidecar, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; -use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; @@ -124,17 +128,6 @@ impl PooledTransactionsElement { } } - /// Returns the transaction nonce. - pub fn nonce(&self) -> u64 { - match self { - Self::Legacy(tx) => tx.tx().nonce(), - Self::Eip2930(tx) => tx.tx().nonce(), - Self::Eip1559(tx) => tx.tx().nonce(), - Self::Eip7702(tx) => tx.tx().nonce(), - Self::BlobTransaction(tx) => tx.tx().nonce(), - } - } - /// Recover signer from signature and hash. /// /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. @@ -224,44 +217,6 @@ impl PooledTransactionsElement { pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } - - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::BlobTransaction(tx) => Some(tx.0.tx().tx.max_fee_per_blob_gas), - _ => None, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy(_) | Self::Eip2930(_) => None, - Self::Eip1559(tx) => Some(tx.tx().max_priority_fee_per_gas), - Self::Eip7702(tx) => Some(tx.tx().max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.0.tx().tx.max_priority_fee_per_gas), - } - } - - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy(tx) => tx.tx().gas_price, - Self::Eip2930(tx) => tx.tx().gas_price, - Self::Eip1559(tx) => tx.tx().max_fee_per_gas, - Self::Eip7702(tx) => tx.tx().max_fee_per_gas, - Self::BlobTransaction(tx) => tx.0.tx().tx.max_fee_per_gas, - } - } } impl Encodable for PooledTransactionsElement { @@ -432,6 +387,178 @@ impl Decodable2718 for PooledTransactionsElement { } } +impl alloy_consensus::Transaction for PooledTransactionsElement { + fn chain_id(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + Self::BlobTransaction(tx) => tx.tx().chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + Self::BlobTransaction(tx) => tx.tx().nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + Self::BlobTransaction(tx) => tx.tx().gas_limit(), + } + } + + fn gas_price(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + Self::BlobTransaction(tx) => tx.tx().gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + Self::BlobTransaction(tx) => tx.tx().priority_fee_or_price(), + } + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + Self::BlobTransaction(tx) => tx.tx().effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + Self::BlobTransaction(tx) => tx.tx().is_dynamic_fee(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + Self::BlobTransaction(tx) => tx.tx().kind(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + Self::BlobTransaction(tx) => tx.tx().value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + Self::BlobTransaction(tx) => tx.tx().input(), + } + } + + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + Self::BlobTransaction(tx) => tx.tx().ty(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + Self::BlobTransaction(tx) => tx.tx().access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + Self::BlobTransaction(tx) => tx.tx().blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + Self::BlobTransaction(tx) => tx.tx().authorization_list(), + } + } +} + impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; From 362e2ed0af251b4c90246cc78b0d0e8349d6c758 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 16:35:50 +0100 Subject: [PATCH 640/970] chore: rename transaction_by_id_no_hash fn (#12783) --- crates/stages/stages/src/stages/sender_recovery.rs | 2 +- .../storage/provider/src/providers/blockchain_provider.rs | 6 +++--- crates/storage/provider/src/providers/consistent.rs | 4 ++-- crates/storage/provider/src/providers/database/mod.rs | 6 +++--- .../storage/provider/src/providers/database/provider.rs | 8 ++++---- crates/storage/provider/src/providers/mod.rs | 4 ++-- crates/storage/provider/src/providers/static_file/jar.rs | 2 +- .../storage/provider/src/providers/static_file/manager.rs | 6 +++--- crates/storage/provider/src/test_utils/mock.rs | 2 +- crates/storage/provider/src/test_utils/noop.rs | 2 +- crates/storage/storage-api/src/transactions.rs | 2 +- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 0b8e2faaebd..d611062b565 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -667,7 +667,7 @@ mod tests { while let Some((_, body)) = body_cursor.next()? { for tx_id in body.tx_num_range() { let transaction: TransactionSigned = provider - .transaction_by_id_no_hash(tx_id)? + .transaction_by_id_unhashed(tx_id)? .map(|tx| { TransactionSigned::new_unhashed(tx.transaction, tx.signature) }) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 74009ffff59..744120dd0c0 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -339,11 +339,11 @@ impl TransactionsProvider for BlockchainProvider2 { self.consistent_provider()?.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { - self.consistent_provider()?.transaction_by_id_no_hash(id) + self.consistent_provider()?.transaction_by_id_unhashed(id) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { @@ -2588,7 +2588,7 @@ mod tests { ), ( ONE, - transaction_by_id_no_hash, + transaction_by_id_unhashed, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, Some(Into::::into( diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 3b2599f4999..7d52dfcc4bb 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -945,13 +945,13 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), - |provider| provider.transaction_by_id_no_hash(id), + |provider| provider.transaction_by_id_unhashed(id), |tx_index, _, block_state| { Ok(block_state .block_ref() diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 7d94fb98a80..491c79d7aa6 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -433,15 +433,15 @@ impl TransactionsProvider for ProviderFactory { ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || self.provider()?.transaction_by_id_no_hash(id), + |static_file| static_file.transaction_by_id_unhashed(id), + || self.provider()?.transaction_by_id_unhashed(id), ) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index bcb9fa415ec..279637abd84 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1482,14 +1482,14 @@ impl> Transaction ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), + |static_file| static_file.transaction_by_id_unhashed(id), || Ok(self.tx.get::(id)?), ) } @@ -1497,7 +1497,7 @@ impl> Transaction fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { Ok(self - .transaction_by_id_no_hash(id)? + .transaction_by_id_unhashed(id)? .map(|tx| TransactionSigned::new(tx.transaction, tx.signature, hash))) } else { Ok(None) @@ -1511,7 +1511,7 @@ impl> Transaction ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { + if let Some(tx) = self.transaction_by_id_unhashed(transaction_id)? { let transaction = TransactionSigned::new(tx.transaction, tx.signature, tx_hash); if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index d530917909c..4d641bb290e 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -425,11 +425,11 @@ impl TransactionsProvider for BlockchainProvider { self.database.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { - self.database.transaction_by_id_no_hash(id) + self.database.transaction_by_id_unhashed(id) } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index e87829b1133..b3ff20d9197 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -221,7 +221,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { .map(|tx| tx.with_hash())) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, ) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index bee42fdac83..7bf0c49893e 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1463,12 +1463,12 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, ) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) - .and_then(|provider| provider.transaction_by_id_no_hash(num)) + .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { Ok(None) @@ -1541,7 +1541,7 @@ impl TransactionsProvider for StaticFileProvider { } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) + Ok(self.transaction_by_id_unhashed(id)?.and_then(|tx| tx.recover_signer())) } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 43bb1e80942..77a4b75a0e2 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -263,7 +263,7 @@ impl TransactionsProvider for MockEthProvider { Ok(transaction) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult> { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index d12539a2c27..966bab5944c 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -200,7 +200,7 @@ impl TransactionsProvider for NoopProvider { Ok(None) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, _id: TxNumber, ) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index f2c44e9e140..a639fcedde5 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -31,7 +31,7 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, ) -> ProviderResult>; From 7f95f1bf072a6d7a4804db7f4067fd397df86267 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 22 Nov 2024 19:52:51 +0400 Subject: [PATCH 641/970] feat: generic static file masks (#12785) --- crates/blockchain-tree/src/externals.rs | 4 +- crates/cli/commands/src/db/get.rs | 6 +- crates/stages/stages/src/stages/bodies.rs | 6 +- crates/storage/db/Cargo.toml | 1 - crates/storage/db/src/static_file/mask.rs | 69 ++++++++----------- crates/storage/db/src/static_file/masks.rs | 45 ++++++++---- crates/storage/db/src/static_file/mod.rs | 1 + .../provider/src/providers/static_file/jar.rs | 22 +++--- .../src/providers/static_file/manager.rs | 18 ++--- 9 files changed, 94 insertions(+), 78 deletions(-) diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index bf5a243a5a5..2b9dae9a3df 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -2,7 +2,7 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; -use reth_db::{static_file::HeaderMask, tables}; +use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; use reth_primitives::{BlockBody, StaticFileSegment}; @@ -85,7 +85,7 @@ impl TreeExternals { hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( StaticFileSegment::Headers, range, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, )?)); } diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index e9fc034519f..8f9a5f1d322 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -2,7 +2,9 @@ use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ - static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, + static_file::{ + ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask, + }, tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_db_api::table::{Decompress, DupSort, Table}; @@ -61,7 +63,7 @@ impl Command { Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) + (table_key::(&key)?, >::MASK) } StaticFileSegment::Transactions => ( table_key::(&key)?, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index e541b908104..b90729c7131 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -523,9 +523,9 @@ mod tests { }, }; use alloy_consensus::Header; - use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use futures_util::Stream; - use reth_db::{static_file::HeaderMask, tables}; + use reth_db::{static_file::HeaderWithHashMask, tables}; use reth_db_api::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -813,7 +813,7 @@ mod tests { for header in static_file_provider.fetch_range_iter( StaticFileSegment::Headers, *range.start()..*range.end() + 1, - |cursor, number| cursor.get_two::>(number.into()), + |cursor, number| cursor.get_two::>(number.into()), )? { let (header, hash) = header?; self.headers.push_back(SealedHeader::new(header, hash)); diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index af72bc43f7e..5ff9fb43a3d 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -48,7 +48,6 @@ page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -paste.workspace = true rustc-hash = { workspace = true, optional = true } sysinfo = { version = "0.31", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index f5d35a193d7..38831ea34ca 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -1,38 +1,5 @@ use reth_db_api::table::Decompress; -/// Generic Mask helper struct for selecting specific column values to read and decompress. -/// -/// #### Explanation: -/// -/// A `NippyJar` static file row can contain multiple column values. To specify the column values -/// to be read, a mask is utilized. -/// -/// For example, a static file with three columns, if the first and last columns are queried, the -/// mask `0b101` would be passed. To select only the second column, the mask `0b010` would be used. -/// -/// Since each static file has its own column distribution, different wrapper types are necessary. -/// For instance, `B256` might be the third column in the `Header` segment, while being the second -/// column in another segment. Hence, `Mask` would only be applicable to one of these -/// scenarios. -/// -/// Alongside, the column selector traits (eg. [`ColumnSelectorOne`]) this provides a structured way -/// to tie the types to be decoded to the mask necessary to query them. -#[derive(Debug)] -pub struct Mask(std::marker::PhantomData<(FIRST, SECOND, THIRD)>); - -macro_rules! add_segments { - ($($segment:tt),+) => { - paste::paste! { - $( - #[doc = concat!("Mask for ", stringify!($segment), " static file segment. See [`Mask`] for more.")] - #[derive(Debug)] - pub struct [<$segment Mask>](Mask); - )+ - } - }; -} -add_segments!(Header, Receipt, Transaction); - /// Trait for specifying a mask to select one column value. pub trait ColumnSelectorOne { /// First desired column value @@ -66,21 +33,45 @@ pub trait ColumnSelectorThree { #[macro_export] /// Add mask to select `N` column values from a specific static file segment row. macro_rules! add_static_file_mask { - ($mask_struct:tt, $type1:ty, $mask:expr) => { - impl ColumnSelectorOne for $mask_struct<$type1> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorOne for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $mask:expr) => { - impl ColumnSelectorTwo for $mask_struct<$type1, $type2> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorTwo for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { - impl ColumnSelectorThree for $mask_struct<$type1, $type2, $type3> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorThree for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type3: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; type THIRD = $type3; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 405606389ba..17833e7ee29 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,23 +1,44 @@ -use super::{ReceiptMask, TransactionMask}; use crate::{ add_static_file_mask, - static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, - HeaderTerminalDifficulties, RawValue, Receipts, Transactions, + static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, + HeaderTerminalDifficulties, }; -use alloy_consensus::Header; use alloy_primitives::BlockHash; use reth_db_api::table::Table; // HEADER MASKS -add_static_file_mask!(HeaderMask, Header, 0b001); -add_static_file_mask!(HeaderMask, ::Value, 0b010); -add_static_file_mask!(HeaderMask, BlockHash, 0b100); -add_static_file_mask!(HeaderMask, Header, BlockHash, 0b101); -add_static_file_mask!(HeaderMask, ::Value, BlockHash, 0b110); +add_static_file_mask! { + #[doc = "Mask for selecting a single header from Headers static file segment"] + HeaderMask, H, 0b001 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty value from Headers static file segment"] + TotalDifficultyMask, ::Value, 0b010 +} +add_static_file_mask! { + #[doc = "Mask for selecting a block hash value from Headers static file segment"] + BlockHashMask, BlockHash, 0b100 +} +add_static_file_mask! { + #[doc = "Mask for selecting a header along with block hash from Headers static file segment"] + HeaderWithHashMask, H, BlockHash, 0b101 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty along with block hash from Headers static file segment"] + TDWithHashMask, + ::Value, + BlockHash, + 0b110 +} // RECEIPT MASKS -add_static_file_mask!(ReceiptMask, ::Value, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single receipt from Receipts static file segment"] + ReceiptMask, R, 0b1 +} // TRANSACTION MASKS -add_static_file_mask!(TransactionMask, ::Value, 0b1); -add_static_file_mask!(TransactionMask, RawValue<::Value>, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single transaction from Transactions static file segment"] + TransactionMask, T, 0b1 +} diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index 071835f566b..8491bd6ed77 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -17,6 +17,7 @@ use reth_primitives::{ }; mod masks; +pub use masks::*; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. type SortedStaticFiles = diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index b3ff20d9197..9bde4a5f760 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -10,8 +10,10 @@ use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; -use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; -use reth_db_api::models::CompactU256; +use reth_db::static_file::{ + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, + TotalDifficultyMask, TransactionMask, +}; use reth_node_types::NodePrimitives; use reth_primitives::{ Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, @@ -90,7 +92,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } @@ -102,13 +104,13 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(td, _)| td.into())) } fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::>(num.into())?.map(Into::into)) + Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { @@ -129,7 +131,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } @@ -145,7 +147,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { @@ -160,7 +162,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { impl BlockHashReader for StaticFileJarProvider<'_, N> { fn block_hash(&self, number: u64) -> ProviderResult> { - self.cursor()?.get_one::>(number.into()) + self.cursor()?.get_one::(number.into()) } fn canonical_hashes_range( @@ -172,7 +174,7 @@ impl BlockHashReader for StaticFileJarProvider<'_, N> { let mut hashes = Vec::with_capacity((end - start) as usize); for number in start..end { - if let Some(hash) = cursor.get_one::>(number.into())? { + if let Some(hash) = cursor.get_one::(number.into())? { hashes.push(hash) } } @@ -200,7 +202,7 @@ impl BlockNumReader for StaticFileJarProvider<'_, N> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? + .get_one::((&hash).into())? .and_then(|res| (res == hash).then(|| cursor.number()).flatten())) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 7bf0c49893e..8ecc33240b4 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -19,14 +19,14 @@ use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db::{ lockfile::StorageLock, - static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, + static_file::{ + iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, + StaticFileCursor, TDWithHashMask, TransactionMask, + }, tables, }; use reth_db_api::{ - cursor::DbCursorRO, - models::{CompactU256, StoredBlockBodyIndices}, - table::Table, - transaction::DbTx, + cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::NodePrimitives; @@ -1236,7 +1236,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1262,7 +1262,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .and_then(|(td, hash)| (&hash == block_hash).then_some(td.0))) }) } @@ -1310,7 +1310,7 @@ impl HeaderProvider for StaticFileProvider { to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1331,7 +1331,7 @@ impl BlockHashReader for StaticFileProvider { self.fetch_range_with_predicate( StaticFileSegment::Headers, start..end, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, ) } From 0416550c5374b511d49546dea76ea84c53a763a8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 16:53:42 +0100 Subject: [PATCH 642/970] chore: move traits for safe and finalized block read/write (#12786) --- .../provider/src/traits/finalized_block.rs | 23 ------------------- crates/storage/provider/src/traits/mod.rs | 3 --- crates/storage/storage-api/src/block.rs | 21 +++++++++++++++++ 3 files changed, 21 insertions(+), 26 deletions(-) delete mode 100644 crates/storage/provider/src/traits/finalized_block.rs diff --git a/crates/storage/provider/src/traits/finalized_block.rs b/crates/storage/provider/src/traits/finalized_block.rs deleted file mode 100644 index 98a6d9d0e34..00000000000 --- a/crates/storage/provider/src/traits/finalized_block.rs +++ /dev/null @@ -1,23 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::ProviderResult; - -/// Functionality to read the last known chain blocks from the database. -pub trait ChainStateBlockReader: Send + Sync { - /// Returns the last finalized block number. - /// - /// If no finalized block has been written yet, this returns `None`. - fn last_finalized_block_number(&self) -> ProviderResult>; - /// Returns the last safe block number. - /// - /// If no safe block has been written yet, this returns `None`. - fn last_safe_block_number(&self) -> ProviderResult>; -} - -/// Functionality to write the last known chain blocks to the database. -pub trait ChainStateBlockWriter: Send + Sync { - /// Saves the given finalized block number in the DB. - fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; - - /// Saves the given safe block number in the DB. - fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; -} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 69f053936bb..a772204d0c1 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -25,6 +25,3 @@ pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; - -mod finalized_block; -pub use finalized_block::{ChainStateBlockReader, ChainStateBlockWriter}; diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 929f7ecca43..37c7857f1c2 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -267,3 +267,24 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns `None` if block is not found. fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } + +/// Functionality to read the last known chain blocks from the database. +pub trait ChainStateBlockReader: Send + Sync { + /// Returns the last finalized block number. + /// + /// If no finalized block has been written yet, this returns `None`. + fn last_finalized_block_number(&self) -> ProviderResult>; + /// Returns the last safe block number. + /// + /// If no safe block has been written yet, this returns `None`. + fn last_safe_block_number(&self) -> ProviderResult>; +} + +/// Functionality to write the last known chain blocks to the database. +pub trait ChainStateBlockWriter: Send + Sync { + /// Saves the given finalized block number in the DB. + fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; + + /// Saves the given safe block number in the DB. + fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; +} From 5b5e69b64e9934c8d76f2ba959933dec1f2d098f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 17:24:51 +0100 Subject: [PATCH 643/970] feat: add InMemorySize for more alloy types (#12788) --- crates/primitives-traits/src/size.rs | 41 ++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 7d83a8af8c4..da3b39888f4 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -1,3 +1,6 @@ +use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; + /// Trait for calculating a heuristic for the in-memory size of a struct. #[auto_impl::auto_impl(&, Arc, Box)] pub trait InMemorySize { @@ -5,8 +8,42 @@ pub trait InMemorySize { fn size(&self) -> usize; } -impl InMemorySize for alloy_consensus::Header { +impl InMemorySize for alloy_consensus::Signed { fn size(&self) -> usize { - self.size() + T::size(self.tx()) + core::mem::size_of::() + core::mem::size_of::() + } +} + +/// Implement `InMemorySize` for a type with a native `size` method. +macro_rules! impl_in_mem_size { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + fn size(&self) -> usize { + Self::size(self) + } + } + )* + }; +} + +impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); + +#[cfg(test)] +mod tests { + use super::*; + + // ensures we don't have any recursion in the `InMemorySize` impls + #[test] + fn no_in_memory_no_recursion() { + fn assert_no_recursion() { + let _ = T::default().size(); + } + assert_no_recursion::

(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); } } From 55d047fcf608cfcdfdf28c3ca01d1f7d7525516d Mon Sep 17 00:00:00 2001 From: Maks Date: Fri, 22 Nov 2024 19:02:46 +0100 Subject: [PATCH 644/970] Grammar and Typo Fixes in Documentation (#12789) --- crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h | 4 ++-- docs/design/metrics.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index 43960abfb4c..dfcba66063a 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -136,7 +136,7 @@ are only a few cases of changing data. | _DELETING_||| |Key is absent → Error since no such key |\ref mdbx_del() or \ref mdbx_replace()|Error \ref MDBX_NOTFOUND| |Key exist → Delete by key |\ref mdbx_del() with the parameter `data = NULL`|Deletion| -|Key exist → Delete by key with with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| +|Key exist → Delete by key with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| |Delete at the current cursor position |\ref mdbx_cursor_del() with \ref MDBX_CURRENT flag|Deletion| |Extract (read & delete) value by the key |\ref mdbx_replace() with zero flag and parameter `new_data = NULL`|Returning a deleted value| @@ -5264,7 +5264,7 @@ LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, * This returns a comparison as if the two data items were keys in the * specified database. * - * \warning There ss a Undefined behavior if one of arguments is invalid. + * \warning There is a Undefined behavior if one of arguments is invalid. * * \param [in] txn A transaction handle returned by \ref mdbx_txn_begin(). * \param [in] dbi A database handle returned by \ref mdbx_dbi_open(). diff --git a/docs/design/metrics.md b/docs/design/metrics.md index 0ac1f71c90d..cc386a11251 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -42,7 +42,7 @@ There will only ever exist one description per metric `KeyName`; it is not possi The `metrics` crate provides three macros per metric variant: `register_!`, `!`, and `describe_!`. Prefer to use these where possible, since they generate the code necessary to register and update metrics under various conditions. - The `register_!` macro simply creates the metric and returns a handle to it (e.g. a `Counter`). These metric structs are thread-safe and cheap to clone. -- The `!` macro registers the metric if it does not exist, and updates it's value. +- The `!` macro registers the metric if it does not exist, and updates its value. - The `describe_!` macro adds an end-user description for the metric. How the metrics are exposed to the end-user is determined by the CLI. From 2d5256cb05b4952194ee513d3b7de4c90c883d3a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 20:14:58 +0100 Subject: [PATCH 645/970] chore: use existing transaction fns (#12793) --- crates/transaction-pool/src/traits.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c45584c50a7..d0ec36cb07e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1245,15 +1245,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128 { - #[allow(unreachable_patterns)] - match &self.transaction.transaction { - Transaction::Legacy(tx) => tx.gas_price, - Transaction::Eip2930(tx) => tx.gas_price, - Transaction::Eip1559(tx) => tx.max_fee_per_gas, - Transaction::Eip4844(tx) => tx.max_fee_per_gas, - Transaction::Eip7702(tx) => tx.max_fee_per_gas, - _ => 0, - } + self.transaction.transaction.max_fee_per_gas() } fn access_list(&self) -> Option<&AccessList> { @@ -1264,14 +1256,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - #[allow(unreachable_patterns, clippy::match_same_arms)] - match &self.transaction.transaction { - Transaction::Legacy(_) | Transaction::Eip2930(_) => None, - Transaction::Eip1559(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip4844(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip7702(tx) => Some(tx.max_priority_fee_per_gas), - _ => None, - } + self.transaction.transaction.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { From 71fd63d9ac658c19a859cbe30f5ca32628b7b845 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 20:41:11 +0100 Subject: [PATCH 646/970] feat: add try_into_pooled_eip4844 (#12792) --- crates/transaction-pool/src/test_utils/mock.rs | 8 ++++++++ crates/transaction-pool/src/traits.rs | 15 +++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 849bde26548..afa1638c851 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -770,6 +770,14 @@ impl EthPoolTransaction for MockTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + Self::Pooled::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, _blob: &BlobTransactionSidecar, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d0ec36cb07e..f4946ed9d16 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1093,6 +1093,13 @@ pub trait EthPoolTransaction: /// Returns the number of blobs this transaction has. fn blob_count(&self) -> usize; + /// A specialization for the EIP-4844 transaction type. + /// Tries to reattach the blob sidecar to the transaction. + /// + /// This returns an option, but callers should ensure that the transaction is an EIP-4844 + /// transaction: [`PoolTransaction::is_eip4844`]. + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option; + /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( &self, @@ -1324,6 +1331,14 @@ impl EthPoolTransaction for EthPooledTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + PooledTransactionsElementEcRecovered::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, From 9a7a733a087a01b243a12890e6630b4e0312638d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 20:41:46 +0100 Subject: [PATCH 647/970] feat: impl InMemorySize for PooledTx (#12791) --- crates/primitives-traits/src/size.rs | 19 ++++++++++++++++++- crates/primitives/src/transaction/pooled.rs | 13 +++++++++++++ crates/primitives/src/transaction/sidecar.rs | 11 +++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index da3b39888f4..4d721dd00b3 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -10,15 +10,32 @@ pub trait InMemorySize { impl InMemorySize for alloy_consensus::Signed { fn size(&self) -> usize { - T::size(self.tx()) + core::mem::size_of::() + core::mem::size_of::() + T::size(self.tx()) + self.signature().size() + self.hash().size() } } +/// Implement `InMemorySize` for a type with `size_of` +macro_rules! impl_in_mem_size_size_of { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } + } + )* + }; +} + +impl_in_mem_size_size_of!(Signature, TxHash); + /// Implement `InMemorySize` for a type with a native `size` method. macro_rules! impl_in_mem_size { ($($ty:ty),*) => { $( impl InMemorySize for $ty { + #[inline] fn size(&self) -> usize { Self::size(self) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index bb840614703..09111c61a17 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -20,6 +20,7 @@ use alloy_primitives::{ use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -559,6 +560,18 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } } +impl InMemorySize for PooledTransactionsElement { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::BlobTransaction(tx) => tx.size(), + } + } +} + impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index c1b1b029afc..2cf04bc8e74 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -4,6 +4,7 @@ use crate::{Transaction, TransactionSigned}; use alloy_consensus::{transaction::RlpEcdsaTx, Signed, TxEip4844WithSidecar}; use alloy_eips::eip4844::BlobTransactionSidecar; use derive_more::Deref; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -73,6 +74,16 @@ impl BlobTransaction { } } +impl InMemorySize for BlobTransaction { + fn size(&self) -> usize { + // TODO(mattsse): replace with next alloy bump + self.0.hash().size() + + self.0.signature().size() + + self.0.tx().tx().size() + + self.0.tx().sidecar.size() + } +} + #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; From 6a97a6dfe4c01c94c0e412641c46333aa6d2cf91 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 20:53:46 +0100 Subject: [PATCH 648/970] chore: include payload id in debug msg (#12795) --- crates/ethereum/payload/src/lib.rs | 2 +- crates/optimism/payload/src/builder.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index f38c93613dc..24312fecbf4 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -451,7 +451,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index eaf9e86e773..ec766876836 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -421,7 +421,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { From 36db1c24077f7a500048c545c4c5cefb6a27224a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 22 Nov 2024 21:13:07 +0100 Subject: [PATCH 649/970] chore: simplify cost calc (#12796) --- crates/transaction-pool/src/traits.rs | 30 +++++++-------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index f4946ed9d16..27bed950c50 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1142,34 +1142,20 @@ impl EthPooledTransaction { pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; - #[allow(unreachable_patterns)] - let gas_cost = match &transaction.transaction { - Transaction::Legacy(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip2930(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip1559(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip4844(t) => { - blob_sidecar = EthBlobTransactionSidecar::Missing; - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip7702(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - _ => U256::ZERO, - }; - let mut cost = transaction.value(); - cost = cost.saturating_add(gas_cost); + let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) + .saturating_mul(U256::from(transaction.transaction.gas_limit())); + + let mut cost = gas_cost.saturating_add(transaction.value()); if let Some(blob_tx) = transaction.as_eip4844() { // Add max blob cost using saturating math to avoid overflow cost = cost.saturating_add(U256::from( blob_tx.max_fee_per_blob_gas.saturating_mul(blob_tx.blob_gas() as u128), )); + + // because the blob sidecar is not included in this transaction variant, mark it as + // missing + blob_sidecar = EthBlobTransactionSidecar::Missing; } Self { transaction, cost, encoded_length, blob_sidecar } From 5db3ad1a674e0ccef7b3e3d33113df62e1df4d49 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 23 Nov 2024 03:04:42 +0400 Subject: [PATCH 650/970] feat: add Transaction AT to `TransactionsProvider` (#12794) --- Cargo.lock | 3 + crates/node/types/src/lib.rs | 3 + crates/optimism/cli/src/ovm_file_codec.rs | 1 + crates/optimism/rpc/src/eth/receipt.rs | 3 +- crates/optimism/rpc/src/eth/transaction.rs | 1 + crates/primitives/src/transaction/mod.rs | 56 ++++++++--- crates/prune/prune/Cargo.toml | 3 + .../src/segments/user/transaction_lookup.rs | 3 +- crates/rpc/rpc-builder/src/lib.rs | 8 +- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 9 +- .../rpc-eth-api/src/helpers/transaction.rs | 20 +++- crates/rpc/rpc-eth-api/src/types.rs | 35 ++++--- crates/rpc/rpc-eth-types/Cargo.toml | 1 + crates/rpc/rpc-eth-types/src/logs_utils.rs | 4 +- crates/rpc/rpc-eth-types/src/transaction.rs | 31 +++--- crates/rpc/rpc-types-compat/src/block.rs | 8 +- .../rpc/rpc-types-compat/src/transaction.rs | 16 +-- crates/rpc/rpc/src/eth/helpers/receipt.rs | 3 +- crates/stages/stages/Cargo.toml | 1 + crates/stages/stages/src/stages/tx_lookup.rs | 9 +- .../src/providers/blockchain_provider.rs | 26 +++-- .../provider/src/providers/consistent.rs | 58 ++++++++--- .../provider/src/providers/database/mod.rs | 20 ++-- .../src/providers/database/provider.rs | 98 ++++++++----------- crates/storage/provider/src/providers/mod.rs | 28 +++--- .../provider/src/providers/static_file/jar.rs | 65 ++++++------ .../src/providers/static_file/manager.rs | 52 +++++----- .../provider/src/providers/static_file/mod.rs | 2 +- .../storage/provider/src/test_utils/mock.rs | 24 +++-- .../storage/provider/src/test_utils/noop.rs | 18 ++-- crates/storage/provider/src/traits/full.rs | 10 +- .../storage/storage-api/src/transactions.rs | 31 +++--- examples/db-access/src/main.rs | 10 +- 33 files changed, 389 insertions(+), 271 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4a6687defb..0f3d34cfba5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8655,6 +8655,7 @@ dependencies = [ name = "reth-prune" version = "1.1.2" dependencies = [ + "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.13.0", @@ -8988,6 +8989,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-revm", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -9062,6 +9064,7 @@ name = "reth-stages" version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "assert_matches", diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2e5558a33bf..a23b9bfe414 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -238,3 +238,6 @@ pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHe /// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`]. pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. +pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs index b29d30093ec..3d746d6d1e0 100644 --- a/crates/optimism/cli/src/ovm_file_codec.rs +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -250,6 +250,7 @@ impl Encodable2718 for TransactionSigned { Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { self.transaction.eip2718_encode(&self.signature, out) } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 2cc771d0e44..5064c9ed5cf 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::ChainSpecProvider; +use reth_provider::{ChainSpecProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; @@ -21,6 +21,7 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, + Self::Provider: TransactionsProvider, { async fn build_transaction_receipt( &self, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index dad151c41c4..19bcd31dacc 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -58,6 +58,7 @@ impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, N: RpcNodeCore, + Self::Pool: TransactionPool, { } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2c639c7ffeb..cc966154c09 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1624,6 +1624,10 @@ impl Encodable2718 for TransactionSigned { fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { self.transaction.eip2718_encode(&self.signature, out) } + + fn trie_hash(&self) -> B256 { + self.hash() + } } impl Decodable2718 for TransactionSigned { @@ -1720,50 +1724,47 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct TransactionSignedEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - signed_transaction: TransactionSigned, + signed_transaction: T, } // === impl TransactionSignedEcRecovered === -impl TransactionSignedEcRecovered { +impl TransactionSignedEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } /// Returns a reference to [`TransactionSigned`] - pub const fn as_signed(&self) -> &TransactionSigned { + pub const fn as_signed(&self) -> &T { &self.signed_transaction } /// Transform back to [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { + pub fn into_signed(self) -> T { self.signed_transaction } /// Dissolve Self to its component - pub fn to_components(self) -> (TransactionSigned, Address) { + pub fn to_components(self) -> (T, Address) { (self.signed_transaction, self.signer) } /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] - pub const fn from_signed_transaction( - signed_transaction: TransactionSigned, - signer: Address, - ) -> Self { + pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { Self { signed_transaction, signer } } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for TransactionSignedEcRecovered { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1776,9 +1777,9 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for TransactionSignedEcRecovered { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let signed_transaction = TransactionSigned::decode(buf)?; + let signed_transaction = T::decode(buf)?; let signer = signed_transaction .recover_signer() .ok_or(RlpError::Custom("Unable to recover decoded transaction signer."))?; @@ -1786,6 +1787,20 @@ impl Decodable for TransactionSignedEcRecovered { } } +/// Extension trait for [`SignedTransaction`] to convert it into [`TransactionSignedEcRecovered`]. +pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// ensuring that the signature has a low `s` value_ (EIP-2). + /// + /// Returns `None` if the transaction's signature is invalid. + fn into_ecrecovered_unchecked(self) -> Option> { + let signer = self.recover_signer_unchecked()?; + Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) + } +} + +impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} + /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { @@ -1991,6 +2006,21 @@ pub mod serde_bincode_compat { } } +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `None`, if some transaction's signature is invalid +pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 4df9ace8133..41156d3e56b 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -24,6 +24,9 @@ reth-config.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true +# ethereum +alloy-eips.workspace = true + # metrics reth-metrics.workspace = true metrics.workspace = true diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index ada4019302e..ce9d90c291b 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -3,6 +3,7 @@ use crate::{ segments::{PruneInput, Segment, SegmentOutput}, PrunerError, }; +use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; @@ -58,7 +59,7 @@ where let hashes = provider .transactions_by_tx_range(tx_range.clone())? .into_par_iter() - .map(|transaction| transaction.hash()) + .map(|transaction| transaction.trie_hash()) .collect::>(); // Number of transactions retrieved from the database should match the tx range count diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 207bc9ec5be..ccf19ed1a0b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -19,6 +19,7 @@ //! use alloy_consensus::Header; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -36,7 +37,8 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: +//! FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -77,6 +79,7 @@ //! use reth_engine_primitives::EngineTypes; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -109,7 +112,8 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: +//! FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 48394f1cd6b..7e1992017d8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,18 +2,21 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_primitives::{Receipt, TransactionMeta}; +use reth_provider::TransactionsProvider; use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { +pub trait LoadReceipt: + EthApiTypes + RpcNodeCoreExt + Send + Sync +{ /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: TransactionSigned, + tx: ::Transaction, meta: TransactionMeta, receipt: Receipt, ) -> impl Future, Self::Error>> + Send; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index afe1c513b69..d87a4855b1d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -9,7 +9,9 @@ use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; -use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_provider::{ + BlockNumReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, +}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, EthApiError, SignError, TransactionSource, @@ -60,10 +62,13 @@ pub trait EthTransactions: LoadTransaction { /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { LoadTransaction::transaction_by_hash(self, hash) } @@ -148,11 +153,15 @@ pub trait EthTransactions: LoadTransaction { } /// Helper method that loads a transaction and its receipt. + #[expect(clippy::complexity)] fn load_transaction_and_receipt( &self, hash: TxHash, ) -> impl Future< - Output = Result, Self::Error>, + Output = Result< + Option<(ProviderTx, TransactionMeta, Receipt)>, + Self::Error, + >, > + Send where Self: 'static, @@ -477,10 +486,13 @@ pub trait LoadTransaction: /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { async move { // Try to find the transaction on disk let mut resp = self diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 12ff090d37c..994f9ac884d 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -7,9 +7,11 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; +use reth_primitives::TransactionSigned; +use reth_provider::TransactionsProvider; use reth_rpc_types_compat::TransactionCompat; -use crate::{AsEthApiError, FromEthApiError, FromEvmError}; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { @@ -43,22 +45,27 @@ pub type RpcReceipt = ::ReceiptResponse; pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. -pub trait FullEthApiTypes: - EthApiTypes< - TransactionCompat: TransactionCompat< - Transaction = RpcTransaction, - Error = RpcError, - >, -> +pub trait FullEthApiTypes +where + Self: RpcNodeCore> + + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + >, { } impl FullEthApiTypes for T where - T: EthApiTypes< - TransactionCompat: TransactionCompat< - Transaction = RpcTransaction, - Error = RpcError, - >, - > + T: RpcNodeCore> + + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 9b38ed89724..98b9530d63c 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -19,6 +19,7 @@ reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 3e7c9db6d68..5ead11b7115 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,7 +2,7 @@ //! //! Log parsing for building filter. -use alloy_eips::BlockNumHash; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; @@ -110,7 +110,7 @@ pub fn append_matching_block_logs( ProviderError::TransactionNotFound(transaction_id.into()) })?; - Some(transaction.hash()) + Some(transaction.trie_hash()) } }; } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index a4ede0a1a4e..83ef97807de 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -4,7 +4,8 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, TransactionCompat, @@ -12,15 +13,15 @@ use reth_rpc_types_compat::{ /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TransactionSource { +pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(TransactionSignedEcRecovered), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: TransactionSignedEcRecovered, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -34,22 +35,22 @@ pub enum TransactionSource { // === impl TransactionSource === -impl TransactionSource { +impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> TransactionSignedEcRecovered { self.into() } /// Conversion into network specific transaction type. - pub fn into_transaction( + pub fn into_transaction>( self, - resp_builder: &T, - ) -> Result { + resp_builder: &Builder, + ) -> Result { match self { Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { - hash: Some(transaction.hash()), + hash: Some(transaction.trie_hash()), index: Some(index), block_hash: Some(block_hash), block_number: Some(block_number), @@ -62,14 +63,14 @@ impl TransactionSource { } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { match self { Self::Pool(tx) => { - let hash = tx.hash(); + let hash = tx.trie_hash(); (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } Self::Block { transaction, index, block_hash, block_number, base_fee } => { - let hash = transaction.hash(); + let hash = transaction.trie_hash(); ( transaction, TransactionInfo { @@ -85,8 +86,8 @@ impl TransactionSource { } } -impl From for TransactionSignedEcRecovered { - fn from(value: TransactionSource) -> Self { +impl From> for TransactionSignedEcRecovered { + fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, TransactionSource::Block { transaction, .. } => transaction, diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 43086b311bd..f2b1d93be83 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -7,7 +7,7 @@ use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, TransactionSigned}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -87,7 +87,11 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) + from_recovered_with_block_context::( + signed_tx_ec_recovered, + tx_info, + tx_resp_builder, + ) }) .collect::, T::Error>>()?; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index 9e8fae67096..31c9d967cd1 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -16,8 +16,8 @@ use serde::{Deserialize, Serialize}; /// /// The block hash, number, and tx index fields should be from the original block where the /// transaction was mined. -pub fn from_recovered_with_block_context( - tx: TransactionSignedEcRecovered, +pub fn from_recovered_with_block_context>( + tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, resp_builder: &T, ) -> Result { @@ -26,15 +26,17 @@ pub fn from_recovered_with_block_context( /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. -pub fn from_recovered( - tx: TransactionSignedEcRecovered, +pub fn from_recovered>( + tx: TransactionSignedEcRecovered, resp_builder: &T, ) -> Result { resp_builder.fill(tx, TransactionInfo::default()) } /// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { +pub trait TransactionCompat: + Send + Sync + Unpin + Clone + fmt::Debug +{ /// RPC transaction response type. type Transaction: Serialize + for<'de> Deserialize<'de> @@ -51,7 +53,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// environment related fields to `None`. fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo, ) -> Result; diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 594cffd09f2..13b0dab2593 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,6 +1,7 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_provider::TransactionsProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -8,7 +9,7 @@ use crate::EthApi; impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt, + Self: RpcNodeCoreExt>, { async fn build_transaction_receipt( &self, diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index eedd5f9ca41..f97214f4643 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -39,6 +39,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 5208cc936ce..fab10b0f953 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -1,12 +1,15 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; -use reth_db::{tables, RawKey, RawValue}; +use reth_db::{table::Value, tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; +use reth_primitives::NodePrimitives; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, @@ -60,7 +63,7 @@ where + BlockReader + PruneCheckpointReader + StatsReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + TransactionsProviderExt, { /// Return the id of the stage @@ -206,7 +209,7 @@ where for tx_id in body.tx_num_range() { // First delete the transaction and hash to id mapping if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { - if tx_hash_number_cursor.seek_exact(transaction.hash())?.is_some() { + if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() { tx_hash_number_cursor.delete_current()?; } } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 744120dd0c0..967ac785b47 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,7 +25,7 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, @@ -331,29 +331,31 @@ impl BlockReader for BlockchainProvider2 { } impl TransactionsProvider for BlockchainProvider2 { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.transaction_by_id(id) } fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -364,21 +366,21 @@ impl TransactionsProvider for BlockchainProvider2 { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transactions_by_tx_range(range) } @@ -2244,9 +2246,7 @@ mod tests { (transactions_by_tx_range, |block: &SealedBlock, _: &Vec>| block .body .transactions - .iter() - .map(|tx| Into::::into(tx.clone())) - .collect::>()), + .clone()), (receipts_by_tx_range, |block: &SealedBlock, receipts: &Vec>| receipts [block.number as usize] .clone()) @@ -2591,9 +2591,7 @@ mod tests { transaction_by_id_unhashed, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, - Some(Into::::into( - block.body.transactions[test_tx_index].clone() - )) + Some(block.body.transactions[test_tx_index].clone()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 7d52dfcc4bb..fc9d739b0fe 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -18,9 +18,10 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_node_types::TxTy; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StorageEntry, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -927,6 +928,8 @@ impl BlockReader for ConsistentProvider { } impl TransactionsProvider for ConsistentProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( tx_hash.into(), @@ -935,12 +938,19 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_by_id(id), |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + Ok(block_state + .block_ref() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) }, ) } @@ -948,7 +958,7 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_by_id_unhashed(id), @@ -965,9 +975,9 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { - return Ok(Some(tx)) + return Ok(Some(tx.into())) } self.storage_provider.transaction_by_hash(hash) @@ -976,11 +986,11 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { if let Some((tx, meta)) = self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) { - return Ok(Some((tx, meta))) + return Ok(Some((tx.into(), meta))) } self.storage_provider.transaction_by_hash_with_meta(tx_hash) @@ -997,22 +1007,44 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + |block_state| { + Ok(Some( + block_state + .block_ref() + .block() + .body + .transactions + .iter() + .map(|tx| tx.clone().into()) + .collect(), + )) + }, ) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |block_state, _| { + Some( + block_state + .block_ref() + .block() + .body + .transactions + .iter() + .map(|tx| tx.clone().into()) + .collect(), + ) + }, |_| true, ) } @@ -1020,7 +1052,7 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 491c79d7aa6..57f09e72306 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,10 +19,10 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; use reth_primitives::{ Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -420,11 +420,13 @@ impl BlockReader for ProviderFactory { } impl TransactionsProvider for ProviderFactory { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, @@ -436,7 +438,7 @@ impl TransactionsProvider for ProviderFactory { fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, @@ -445,14 +447,14 @@ impl TransactionsProvider for ProviderFactory { ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -463,21 +465,21 @@ impl TransactionsProvider for ProviderFactory { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transactions_by_tx_range(range) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 279637abd84..bf976203726 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3,7 +3,7 @@ use crate::{ providers::{ database::{chain::ChainStorage, metrics}, static_file::StaticFileWriter, - ProviderNodeTypes, StaticFileProvider, + NodeTypesForProvider, StaticFileProvider, }, to_range, traits::{ @@ -46,7 +46,7 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::NodeTypes; +use reth_node_types::{NodeTypes, TxTy}; use reth_primitives::{ Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, @@ -243,7 +243,7 @@ impl AsRef for DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Unwinds trie state for the given range. /// /// This includes calculating the resulted state root and comparing it with the parent block @@ -374,7 +374,7 @@ impl TryIntoHistoricalStateProvider for Databa } } -impl DatabaseProvider { +impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** @@ -486,14 +486,16 @@ impl DatabaseProvider { pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } +} +impl DatabaseProvider { fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, cursor: &mut C, - ) -> ProviderResult> + ) -> ProviderResult>> where - C: DbCursorRO, + C: DbCursorRO>>, { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Transactions, @@ -507,7 +509,7 @@ impl DatabaseProvider { fn block_with_senders( &self, id: BlockHashOrNumber, - transaction_kind: TransactionVariant, + _transaction_kind: TransactionVariant, header_by_number: HF, construct_block: BF, ) -> ProviderResult> @@ -546,15 +548,7 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => { - TransactionSigned::new_unhashed(tx.transaction, tx.signature) - } - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + let body = transactions.into_iter().map(Into::into).collect(); construct_block(header, body, senders, ommers, withdrawals) } @@ -663,7 +657,7 @@ impl DatabaseProvider { Vec
, ) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut senders_cursor = self.tx.cursor_read::()?; self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { @@ -1219,9 +1213,7 @@ impl BlockNumReader for DatabaseProvider> BlockReader - for DatabaseProvider -{ +impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1245,7 +1237,7 @@ impl> BlockReader // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions, + Some(transactions) => transactions.into_iter().map(Into::into).collect(), None => return Ok(None), }; @@ -1345,7 +1337,7 @@ impl> BlockReader } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; self.block_range( range, |range| self.headers_range(range), @@ -1396,7 +1388,7 @@ impl> BlockReader } } -impl> TransactionsProviderExt +impl TransactionsProviderExt for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and @@ -1466,53 +1458,49 @@ impl> Transaction } // Calculates the hash of the given transaction -impl> TransactionsProvider - for DatabaseProvider -{ +impl TransactionsProvider for DatabaseProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, |static_file| static_file.transaction_by_id(id), - || Ok(self.tx.get::(id)?.map(Into::into)), + || Ok(self.tx.get::>(id)?), ) } fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, |static_file| static_file.transaction_by_id_unhashed(id), - || Ok(self.tx.get::(id)?), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { - Ok(self - .transaction_by_id_unhashed(id)? - .map(|tx| TransactionSigned::new(tx.transaction, tx.signature, hash))) + Ok(self.transaction_by_id_unhashed(id)?) } else { Ok(None) } - .map(|tx| tx.map(Into::into)) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(tx) = self.transaction_by_id_unhashed(transaction_id)? { - let transaction = TransactionSigned::new(tx.transaction, tx.signature, tx_hash); + if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? { if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { @@ -1553,8 +1541,8 @@ impl> Transaction fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(body) = self.block_body_indices(block_number)? { @@ -1562,12 +1550,7 @@ impl> Transaction return if tx_range.is_empty() { Ok(Some(Vec::new())) } else { - Ok(Some( - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect(), - )) + Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?)) } } } @@ -1577,8 +1560,8 @@ impl> Transaction fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; let mut results = Vec::new(); let mut body_cursor = self.tx.cursor_read::()?; for entry in body_cursor.walk_range(range)? { @@ -1590,7 +1573,6 @@ impl> Transaction results.push( self.transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? .into_iter() - .map(Into::into) .collect(), ); } @@ -1601,10 +1583,10 @@ impl> Transaction fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.transactions_by_tx_range_with_cursor( range, - &mut self.tx.cursor_read::()?, + &mut self.tx.cursor_read::>()?, ) } @@ -1620,9 +1602,7 @@ impl> Transaction } } -impl> ReceiptProvider - for DatabaseProvider -{ +impl ReceiptProvider for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, @@ -1887,7 +1867,9 @@ impl StorageReader for DatabaseProvider } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter + for DatabaseProvider +{ fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2710,13 +2692,13 @@ impl HistoryWriter for DatabaseProvi } } -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl BlockExecutionWriter +impl BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_above( @@ -2766,7 +2748,7 @@ impl BlockExecutio } } -impl BlockWriter +impl BlockWriter for DatabaseProvider { type Body = <::Block as reth_primitives_traits::Block>::Body; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 4d641bb290e..68d1a168f15 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -23,10 +23,10 @@ use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB}; +use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, + TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -76,7 +76,9 @@ where Self: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: FullNodePrimitives< + SignedTx: Value + From + Into, + >, >, { } @@ -85,7 +87,9 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: FullNodePrimitives< + SignedTx: Value + From + Into, + >, > { } @@ -417,29 +421,31 @@ impl BlockReader for BlockchainProvider { } impl TransactionsProvider for BlockchainProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.database.transaction_by_id(id) } fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transaction_by_hash_with_meta(tx_hash) } @@ -450,21 +456,21 @@ impl TransactionsProvider for BlockchainProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transactions_by_tx_range(range) } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 9bde4a5f760..e04d46312f6 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -7,17 +7,19 @@ use crate::{ TransactionsProvider, }; use alloy_consensus::Header; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; -use reth_db::static_file::{ - BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, - TotalDifficultyMask, TransactionMask, +use reth_db::{ + static_file::{ + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, + TDWithHashMask, TotalDifficultyMask, TransactionMask, + }, + table::Decompress, }; use reth_node_types::NodePrimitives; -use reth_primitives::{ - Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, -}; +use reth_primitives::{transaction::recover_signers, Receipt, SealedHeader, TransactionMeta}; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -207,40 +209,38 @@ impl BlockNumReader for StaticFileJarProvider<'_, N> { } } -impl TransactionsProvider for StaticFileJarProvider<'_, N> { +impl> TransactionsProvider + for StaticFileJarProvider<'_, N> +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? - .and_then(|res| (res.hash() == hash).then(|| cursor.number()).flatten())) + .get_one::>((&hash).into())? + .and_then(|res| (res.trie_hash() == hash).then(|| cursor.number()).flatten())) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>(num.into())? - .map(|tx| tx.with_hash())) + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + ) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash())) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.cursor()?.get_one::>((&hash).into()) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Information required on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } @@ -253,7 +253,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -262,7 +262,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -271,15 +271,13 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut txes = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = - cursor.get_one::>(num.into())? - { + if let Some(tx) = cursor.get_one::>(num.into())? { txes.push(tx) } } @@ -291,19 +289,20 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? - .get_one::>(num.into())? + .get_one::>(num.into())? .and_then(|tx| tx.recover_signer())) } } -impl ReceiptProvider for StaticFileJarProvider<'_, N> { +impl> ReceiptProvider + for StaticFileJarProvider<'_, N> +{ fn receipt(&self, num: TxNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 8ecc33240b4..14821fde547 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -9,6 +9,7 @@ use crate::{ }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; @@ -23,6 +24,7 @@ use reth_db::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, TransactionMask, }, + table::{Decompress, Value}, tables, }; use reth_db_api::{ @@ -35,9 +37,11 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, + transaction::recover_signers, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -1337,7 +1341,9 @@ impl BlockHashReader for StaticFileProvider { } } -impl ReceiptProvider for StaticFileProvider { +impl> ReceiptProvider + for StaticFileProvider +{ fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) @@ -1374,7 +1380,9 @@ impl ReceiptProvider for StaticFileProvider { } } -impl TransactionsProviderExt for StaticFileProvider { +impl> TransactionsProviderExt + for StaticFileProvider +{ fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1435,13 +1443,17 @@ impl TransactionsProviderExt for StaticFileProvider { } } -impl TransactionsProvider for StaticFileProvider { +impl> TransactionsProvider + for StaticFileProvider +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; if cursor - .get_one::>((&tx_hash).into())? - .and_then(|tx| (tx.hash() == tx_hash).then_some(tx)) + .get_one::>((&tx_hash).into())? + .and_then(|tx| (tx.trie_hash() == tx_hash).then_some(tx)) .is_some() { Ok(cursor.number()) @@ -1451,7 +1463,7 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { @@ -1466,7 +1478,7 @@ impl TransactionsProvider for StaticFileProvider { fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { @@ -1478,20 +1490,19 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { Ok(jar_provider .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash()) - .and_then(|tx| (tx.hash_ref() == &hash).then_some(tx))) + .get_one::>((&hash).into())? + .and_then(|tx| (tx.trie_hash() == hash).then_some(tx))) }) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1504,7 +1515,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1512,7 +1523,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1520,13 +1531,11 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Transactions, to_range(range), - |cursor, number| { - cursor.get_one::>(number.into()) - }, + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } @@ -1536,8 +1545,7 @@ impl TransactionsProvider for StaticFileProvider { range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txes, txes.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { @@ -1569,7 +1577,7 @@ impl BlockNumReader for StaticFileProvider { } } -impl BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { fn find_block_by_hash( &self, _hash: B256, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 58a9e3bb378..673451de65f 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -415,7 +415,7 @@ mod tests { #[allow(clippy::too_many_arguments)] fn prune_and_validate( - sf_rw: &StaticFileProvider<()>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 77a4b75a0e2..a0ecb7256cb 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -25,7 +25,6 @@ use reth_node_types::NodeTypes; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -244,6 +243,8 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { let lock = self.blocks.lock(); let tx_number = lock @@ -255,7 +256,7 @@ impl TransactionsProvider for MockEthProvider { Ok(tx_number) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); @@ -266,13 +267,10 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); - let transaction = lock - .values() - .flat_map(|block| &block.body.transactions) - .nth(id as usize) - .map(|tx| Into::::into(tx.clone())); + let transaction = + lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); Ok(transaction) } @@ -286,7 +284,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.transactions.iter().enumerate() { @@ -322,14 +320,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(self.block(id)?.map(|b| b.body.transactions)) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); for (_, block) in self.blocks.lock().iter() { @@ -344,14 +342,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() .flat_map(|block| &block.body.transactions) .enumerate() .filter(|&(tx_number, _)| range.contains(&(tx_number as TxNumber))) - .map(|(_, tx)| tx.clone().into()) + .map(|(_, tx)| tx.clone()) .collect(); Ok(transactions) diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 966bab5944c..9a88c8c9ab7 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -23,7 +23,7 @@ use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + SealedHeader, TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -192,29 +192,31 @@ impl BlockIdReader for NoopProvider { } impl TransactionsProvider for NoopProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { Ok(None) } - fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } fn transaction_by_id_unhashed( &self, _id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } @@ -225,21 +227,21 @@ impl TransactionsProvider for NoopProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(Vec::default()) } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(Vec::default()) } diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 4998e974165..9bb357e33a3 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,13 +7,13 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt + + StaticFileProviderFactory + + BlockReaderIdExt> + AccountReader + StateProviderFactory + EvmEnvProvider @@ -30,8 +30,8 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt + + StaticFileProviderFactory + + BlockReaderIdExt> + AccountReader + StateProviderFactory + EvmEnvProvider diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index a639fcedde5..ca2bcaeb469 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,7 +1,8 @@ use crate::{BlockNumReader, BlockReader}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; -use reth_primitives::{TransactionMeta, TransactionSigned, TransactionSignedNoHash}; +use reth_primitives::TransactionMeta; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; @@ -18,9 +19,12 @@ pub enum TransactionVariant { WithHash, } -/// Client trait for fetching [TransactionSigned] related data. +/// Client trait for fetching transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProvider: BlockNumReader + Send + Sync { + /// The transaction type this provider reads. + type Transaction: Send + Sync + SignedTransaction; + /// Get internal transaction identifier by transaction hash. /// /// This is the inverse of [TransactionsProvider::transaction_by_id]. @@ -28,23 +32,21 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; /// Get transaction by id, computes hash every time so more expensive. - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. - fn transaction_by_id_unhashed( - &self, - id: TxNumber, - ) -> ProviderResult>; + fn transaction_by_id_unhashed(&self, id: TxNumber) + -> ProviderResult>; /// Get transaction by transaction hash. - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get transaction by transaction hash and additional metadata of the block the transaction was /// mined in fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get transaction block number fn transaction_block(&self, id: TxNumber) -> ProviderResult>; @@ -53,19 +55,19 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transactions_by_block( &self, block: BlockHashOrNumber, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by block range. fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by tx range. fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get Senders from a tx range. fn senders_by_tx_range( @@ -79,7 +81,10 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_sender(&self, id: TxNumber) -> ProviderResult>; } -/// Client trait for fetching additional [TransactionSigned] related data. +/// A helper type alias to access [`TransactionsProvider::Transaction`]. +pub type ProviderTx

=

::Transaction; + +/// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { /// Get transactions range by block range. diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index f3b7fdf5842..179d1216053 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -4,7 +4,7 @@ use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::SealedHeader; +use reth_primitives::{SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -83,7 +83,9 @@ fn header_provider_example(provider: T, number: u64) -> eyre: } /// The `TransactionsProvider` allows querying transaction-related information -fn txs_provider_example(provider: T) -> eyre::Result<()> { +fn txs_provider_example>( + provider: T, +) -> eyre::Result<()> { // Try the 5th tx let txid = 5; @@ -160,7 +162,9 @@ fn block_provider_example(provider: T, number: u64) -> eyre::Res } /// The `ReceiptProvider` allows querying the receipts tables. -fn receipts_provider_example( +fn receipts_provider_example< + T: ReceiptProvider + TransactionsProvider + HeaderProvider, +>( provider: T, ) -> eyre::Result<()> { let txid = 5; From 047bf8630aebe79a5f79a0edfe98f42345194b56 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 23 Nov 2024 12:05:03 +0700 Subject: [PATCH 651/970] perf(rpc-tx-helpers): simplify clones (#12800) --- .../rpc/rpc-eth-api/src/helpers/transaction.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d87a4855b1d..2223ecdc9f7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -166,10 +166,9 @@ pub trait EthTransactions: LoadTransaction { where Self: 'static, { - let this = self.clone(); + let provider = self.provider().clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match this - .provider() + let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -177,11 +176,10 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = - match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = match provider.receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -334,7 +332,7 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send { async move { - let recovered = recover_raw_transaction(tx.clone())?; + let recovered = recover_raw_transaction(tx)?; let pool_transaction = ::Transaction::from_pooled(recovered.into()); From f8a88c50d19d3749df94eb52dd009738552e31d6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 06:21:20 +0100 Subject: [PATCH 652/970] chore: add missing from impl (#12801) --- crates/primitives/src/transaction/pooled.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 09111c61a17..e526eb3894f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -572,6 +572,12 @@ impl InMemorySize for PooledTransactionsElement { } } +impl From for PooledTransactionsElement { + fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { + recovered.into_transaction() + } +} + impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; From 13786c76d440e9db157d0bf50722ae6ed13e950c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 06:21:29 +0100 Subject: [PATCH 653/970] chore: use inmemory size functions for tx impl (#12802) --- crates/primitives/src/transaction/mod.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index cc966154c09..abbf4d40248 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -14,10 +14,7 @@ use alloy_primitives::{ keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use core::{ - hash::{Hash, Hasher}, - mem, -}; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] @@ -1461,7 +1458,7 @@ impl InMemorySize for TransactionSigned { /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. #[inline] fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() + self.hash().size() + self.transaction.size() + self.signature().size() } } From 0d17f14e3d5ffdc6bfb602a6cb06b24b398fd0d8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 08:08:41 +0100 Subject: [PATCH 654/970] perf: inline pooled transactions max (#12805) --- crates/transaction-pool/src/lib.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8d11d7595b1..3194ebba6f8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -409,7 +409,7 @@ where &self, max: usize, ) -> Vec>> { - self.pooled_transactions().into_iter().take(max).collect() + self.pool.pooled_transactions_max(max) } fn get_pooled_transaction_elements( diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 3f7ecfa7836..8c17da783ac 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -304,6 +304,14 @@ where self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() } + /// Returns only the first `max` transactions in the pool. + pub(crate) fn pooled_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() + } + /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. /// /// Caution: this assumes the given transaction is eip-4844 From ebb4fc2bb65aaa3ede9155bdd273cb7fcc773e3b Mon Sep 17 00:00:00 2001 From: Z <12710516+zitup@users.noreply.github.com> Date: Sat, 23 Nov 2024 19:43:14 +0800 Subject: [PATCH 655/970] chore(sdk): Add MaybeArbitrary as super trait (#12661) Co-authored-by: Emilia Hane --- Cargo.lock | 242 +++++++++--------- crates/optimism/node/Cargo.toml | 1 + crates/optimism/primitives/Cargo.toml | 14 + crates/optimism/primitives/src/tx_type.rs | 7 +- crates/primitives-traits/src/block/body.rs | 3 +- crates/primitives-traits/src/block/header.rs | 4 +- crates/primitives-traits/src/block/mod.rs | 16 +- crates/primitives-traits/src/header/sealed.rs | 7 +- crates/primitives-traits/src/receipt.rs | 5 +- .../src/transaction/tx_type.rs | 3 +- 10 files changed, 171 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f3d34cfba5..e0cfc839029 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -418,7 +418,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -643,7 +643,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -659,7 +659,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", "tiny-keccak", ] @@ -675,7 +675,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", ] @@ -881,7 +881,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1104,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1115,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1153,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1259,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1441,7 +1441,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -1548,9 +1548,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" dependencies = [ "bytemuck_derive", ] @@ -1563,7 +1563,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1781,7 +1781,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2225,7 +2225,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2249,7 +2249,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2260,7 +2260,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2382,7 +2382,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2393,7 +2393,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2414,7 +2414,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "unicode-xid", ] @@ -2528,7 +2528,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2678,7 +2678,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2689,7 +2689,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2746,7 +2746,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3306,7 +3306,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3485,9 +3485,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -3750,9 +3750,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3832,7 +3832,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3982,7 +3982,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4043,13 +4043,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4171,7 +4171,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4272,9 +4272,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jni" @@ -4419,7 +4419,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4579,9 +4579,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libloading" @@ -4601,9 +4601,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58", @@ -4837,7 +4837,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4964,9 +4964,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -4978,14 +4978,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5233,7 +5233,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5286,9 +5286,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" +checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5304,9 +5304,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" +checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5319,9 +5319,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" +checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" dependencies = [ "alloy-consensus", "alloy-network", @@ -5334,9 +5334,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" +checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5357,9 +5357,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" +checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5376,9 +5376,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" +checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5470,9 +5470,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5481,19 +5481,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -5608,7 +5609,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5637,7 +5638,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5809,7 +5810,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5860,14 +5861,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "307e3004becf10f5a6e0d59d20f3cd28231b0e0827a96cd3e0ce6d14bc1e4bb3" dependencies = [ "unicode-ident", ] @@ -5958,7 +5959,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -6315,7 +6316,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-util", @@ -6770,7 +6771,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -8388,6 +8389,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "arbitrary", "bytes", "derive_more 1.0.0", "op-alloy-consensus", @@ -9682,7 +9684,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.87", + "syn 2.0.89", "unicode-ident", ] @@ -9764,9 +9766,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9777,9 +9779,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", @@ -9922,9 +9924,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -10036,9 +10038,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -10072,14 +10074,14 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10107,7 +10109,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10158,7 +10160,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10181,7 +10183,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10464,7 +10466,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10522,9 +10524,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -10540,7 +10542,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10551,9 +10553,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -10566,7 +10568,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10643,7 +10645,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10691,7 +10693,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10702,7 +10704,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10879,7 +10881,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11007,9 +11009,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", @@ -11080,7 +11082,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11329,9 +11331,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -11478,7 +11480,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11549,7 +11551,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -11583,7 +11585,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11609,9 +11611,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" dependencies = [ "futures", "js-sys", @@ -11749,7 +11751,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11760,7 +11762,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11771,7 +11773,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11782,7 +11784,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12057,7 +12059,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12079,7 +12081,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12099,7 +12101,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12120,7 +12122,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12142,7 +12144,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 18ceee8ef8b..fbc055a82e9 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -119,6 +119,7 @@ test-utils = [ "reth-trie-db/test-utils", "revm/test-utils", "reth-optimism-node/test-utils", + "reth-optimism-primitives/arbitrary", ] reth-codec = [ "reth-primitives/reth-codec", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index fc368807736..33f936b2fd1 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -34,9 +34,13 @@ serde = { workspace = true, optional = true } # misc derive_more.workspace = true +# test-utils +arbitrary = { workspace = true, features = ["derive"], optional = true } + [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } rstest.workspace = true +arbitrary.workspace = true [features] default = ["std", "reth-codec"] @@ -65,3 +69,13 @@ serde = [ "reth-codecs/serde", "op-alloy-consensus/serde", ] +arbitrary = [ + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", +] \ No newline at end of file diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index c6e7fcc0a80..9976221b424 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -2,10 +2,11 @@ //! `OpTxType` implements `reth_primitives_traits::TxType`. //! This type is required because a `Compact` impl is needed on the deposit tx type. +use core::fmt::Debug; + use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; -use core::fmt::Debug; use derive_more::{ derive::{From, Into}, Display, @@ -13,8 +14,10 @@ use derive_more::{ use op_alloy_consensus::OpTxType as AlloyOpTxType; use reth_primitives_traits::{InMemorySize, TxType}; -/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement [`TxType`] trait. +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement +/// [`TxType`] trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[into(u8)] pub struct OpTxType(AlloyOpTxType); diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index ff41536ba3f..fd7f7f1c631 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -4,7 +4,7 @@ use alloc::fmt; use alloy_consensus::Transaction; -use crate::{FullSignedTx, InMemorySize, MaybeSerde}; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -26,6 +26,7 @@ pub trait BlockBody: + alloy_rlp::Decodable + InMemorySize + MaybeSerde + + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. type Transaction: Transaction; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 695e63ed10e..26806808532 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::Sealable; -use crate::{InMemorySize, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. @@ -28,6 +28,7 @@ pub trait BlockHeader: + Sealable + InMemorySize + MaybeSerde + + MaybeArbitrary { } @@ -46,5 +47,6 @@ impl BlockHeader for T where + Sealable + InMemorySize + MaybeSerde + + MaybeArbitrary { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 3f4fbd343ee..c0f5a1ffc63 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,7 +5,9 @@ pub mod header; use alloc::fmt; -use crate::{BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde}; +use crate::{ + BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, +}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: @@ -26,7 +28,17 @@ impl FullBlock for T where // senders #[auto_impl::auto_impl(&, Arc)] pub trait Block: - Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + InMemorySize + MaybeSerde + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Header part of the block. type Header: BlockHeader + 'static; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f0a6869ed1e..08add0ac3c1 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -159,9 +159,12 @@ impl From> for Sealed { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = Header::arbitrary(u)?; + let header = H::arbitrary(u)?; Ok(Self::seal(header)) } diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 4370d2ac00f..e2af40c447e 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,12 +1,12 @@ //! Receipt abstraction +use alloc::vec::Vec; use core::fmt; -use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; -use crate::{InMemorySize, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + MaybeCompact {} @@ -27,6 +27,7 @@ pub trait Receipt: + alloy_rlp::Decodable + MaybeSerde + InMemorySize + + MaybeArbitrary { /// Returns transaction type. fn tx_type(&self) -> u8; diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index 931fcb773bf..d2caebe4c9f 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::{U64, U8}; -use crate::{InMemorySize, MaybeCompact}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. @@ -33,6 +33,7 @@ pub trait TxType: + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize + + MaybeArbitrary { /// Returns `true` if this is a legacy transaction. fn is_legacy(&self) -> bool; From e3ffb3f43b3717990a94e21ab5c3fc732a4234a4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:22:49 +0100 Subject: [PATCH 656/970] fix: add arbitrary feature for op cli dev (#12807) --- crates/optimism/cli/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index d090075927a..ba36568efe8 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -111,7 +111,8 @@ jemalloc = [ dev = [ "dep:proptest", - "reth-cli-commands/arbitrary" + "reth-cli-commands/arbitrary", + "reth-optimism-primitives/arbitrary" ] serde = [ "alloy-consensus?/serde", From 69e54da04983bd8b4f5c3c17d8d3bc63ab0fe181 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:24:17 +0100 Subject: [PATCH 657/970] chore: remove duplicated functions (#12804) --- Cargo.lock | 1 + crates/optimism/evm/src/l1.rs | 1 + crates/primitives/src/transaction/mod.rs | 102 ++---------------- crates/primitives/src/transaction/pooled.rs | 1 + .../rpc/rpc-types-compat/src/transaction.rs | 2 +- crates/rpc/rpc/src/eth/bundle.rs | 8 +- crates/rpc/rpc/src/eth/helpers/signer.rs | 1 + .../beacon-api-sidecar-fetcher/Cargo.toml | 1 + .../src/mined_sidecar.rs | 1 + 9 files changed, 20 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0cfc839029..9ae0574414e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2759,6 +2759,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-beacon", "clap", diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 9d3e76fb442..ef8c3f3b3db 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,6 +2,7 @@ use crate::OpBlockExecutionError; use alloc::{string::ToString, sync::Arc}; +use alloy_consensus::Transaction; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index abbf4d40248..af0529132b9 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -219,29 +219,6 @@ impl Transaction { } } - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - pub const fn kind(&self) -> TxKind { - match self { - Self::Legacy(TxLegacy { to, .. }) | - Self::Eip2930(TxEip2930 { to, .. }) | - Self::Eip1559(TxEip1559 { to, .. }) => *to, - Self::Eip4844(TxEip4844 { to, .. }) | Self::Eip7702(TxEip7702 { to, .. }) => { - TxKind::Call(*to) - } - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { to, .. }) => *to, - } - } - - /// Get the transaction's address of the contract that will be called, or the address that will - /// receive the transfer. - /// - /// Returns `None` if this is a `CREATE` transaction. - pub fn to(&self) -> Option

{ - self.kind().to().copied() - } - /// Get the transaction's type pub const fn tx_type(&self) -> TxType { match self { @@ -255,56 +232,6 @@ impl Transaction { } } - /// Returns the [`AccessList`] of the transaction. - /// - /// Returns `None` for legacy transactions. - pub const fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy(_) => None, - Self::Eip2930(tx) => Some(&tx.access_list), - Self::Eip1559(tx) => Some(&tx.access_list), - Self::Eip4844(tx) => Some(&tx.access_list), - Self::Eip7702(tx) => Some(&tx.access_list), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - - /// Returns the [`SignedAuthorization`] list of the transaction. - /// - /// Returns `None` if this transaction is not EIP-7702. - pub fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - match self { - Self::Eip7702(tx) => Some(&tx.authorization_list), - _ => None, - } - } - - /// Returns true if the tx supports dynamic fees - pub const fn is_dynamic_fee(&self) -> bool { - match self { - Self::Legacy(_) | Self::Eip2930(_) => false, - Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, - #[cfg(feature = "optimism")] - Self::Deposit(_) => false, - } - } - - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 - /// transactions this is `None` - /// - /// This is also commonly referred to as the "blob versioned hashes" (`BlobVersionedHashes`). - pub fn blob_versioned_hashes(&self) -> Option> { - match self { - Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => None, - Self::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { - Some(blob_versioned_hashes.clone()) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// @@ -345,19 +272,6 @@ impl Transaction { } } - /// Get the transaction's input field. - pub const fn input(&self) -> &Bytes { - match self { - Self::Legacy(TxLegacy { input, .. }) | - Self::Eip2930(TxEip2930 { input, .. }) | - Self::Eip1559(TxEip1559 { input, .. }) | - Self::Eip4844(TxEip4844 { input, .. }) | - Self::Eip7702(TxEip7702 { input, .. }) => input, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { input, .. }) => input, - } - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { @@ -2097,13 +2011,15 @@ mod tests { assert_eq!( tx.blob_versioned_hashes(), - Some(vec![ - b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), - b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), - b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), - b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), - b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), - ]) + Some( + &[ + b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), + b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), + b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), + b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), + b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), + ][..] + ) ); } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index e526eb3894f..dff6d090096 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -696,6 +696,7 @@ impl TryFrom for PooledTransactionsElementEcRecove #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Transaction as _; use alloy_primitives::{address, hex}; use assert_matches::assert_matches; use bytes::Bytes; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index 31c9d967cd1..b439b61d44e 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -75,7 +75,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let authorization_list = tx.transaction.authorization_list().map(|l| l.to_vec()); - let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); + let blob_versioned_hashes = tx.transaction.blob_versioned_hashes().map(Vec::from); let tx_type = tx.transaction.tx_type(); // fees depending on the transaction type diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index ee2b3ed5e7c..10eec4dbf97 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,11 +1,12 @@ //! `Eth` bundle implementation and helpers. +use alloy_consensus::Transaction as _; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::PooledTransactionsElement; +use reth_primitives::{PooledTransactionsElement, Transaction}; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_api::{ @@ -19,7 +20,7 @@ use revm::{ primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; -use std::sync::Arc; +use std::{ops::Deref, sync::Arc}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -179,8 +180,7 @@ where let tx = tx.into_transaction(); hasher.update(tx.hash()); - let gas_price = tx - .effective_tip_per_gas(basefee) + let gas_price = Transaction::effective_tip_per_gas(tx.deref(), basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index e7e9c64447b..32645ba08d6 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -109,6 +109,7 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { + use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; use revm_primitives::TxKind; diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 47a2a181f7e..d9590f87e07 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -11,6 +11,7 @@ reth-node-ethereum.workspace = true alloy-rpc-types-beacon.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true clap.workspace = true eyre.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 2436ee0210e..d2077edafff 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,4 +1,5 @@ use crate::BeaconSidecarConfig; +use alloy_consensus::Transaction as _; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; use eyre::Result; From c96118346a218f2845d9047895cd9e38205a7d50 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:25:18 +0100 Subject: [PATCH 658/970] chore: rm unused variant type (#12798) --- crates/primitives/src/transaction/mod.rs | 2 - crates/primitives/src/transaction/variant.rs | 145 ------------------- 2 files changed, 147 deletions(-) delete mode 100644 crates/primitives/src/transaction/variant.rs diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index af0529132b9..ca03bfe4f7c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -41,7 +41,6 @@ pub use reth_primitives_traits::WithEncoded; pub use sidecar::BlobTransaction; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; -pub use variant::TransactionSignedVariant; pub(crate) mod access_list; mod compat; @@ -56,7 +55,6 @@ mod tx_type; pub mod signature; pub(crate) mod util; -mod variant; /// Expected number of transactions where we can expect a speed-up by recovering the senders in /// parallel. diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs deleted file mode 100644 index dd47df9a869..00000000000 --- a/crates/primitives/src/transaction/variant.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Helper enum functions for `Transaction`, `TransactionSigned` and -//! `TransactionSignedEcRecovered` - -use crate::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, -}; -use alloy_primitives::{Address, B256}; -use core::ops::Deref; - -/// Represents various different transaction formats used in reth. -/// -/// All variants are based on a the raw [Transaction] data and can contain additional information -/// extracted (expensive) from that transaction, like the hash and the signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] -pub enum TransactionSignedVariant { - /// A signed transaction without a hash. - SignedNoHash(TransactionSignedNoHash), - /// Contains the plain transaction data its signature and hash. - Signed(TransactionSigned), - /// Contains the plain transaction data its signature and hash and the successfully recovered - /// signer. - SignedEcRecovered(TransactionSignedEcRecovered), -} - -impl TransactionSignedVariant { - /// Returns the raw transaction object - pub const fn as_raw(&self) -> &Transaction { - match self { - Self::SignedNoHash(tx) => &tx.transaction, - Self::Signed(tx) => &tx.transaction, - Self::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, - } - } - - /// Returns the hash of the transaction - pub fn hash(&self) -> B256 { - match self { - Self::SignedNoHash(tx) => tx.hash(), - Self::Signed(tx) => tx.hash(), - Self::SignedEcRecovered(tx) => tx.hash(), - } - } - - /// Returns the signer of the transaction. - /// - /// If the transaction is of not of [`TransactionSignedEcRecovered`] it will be recovered. - pub fn signer(&self) -> Option
{ - match self { - Self::SignedNoHash(tx) => tx.recover_signer(), - Self::Signed(tx) => tx.recover_signer(), - Self::SignedEcRecovered(tx) => Some(tx.signer), - } - } - - /// Returns [`TransactionSigned`] type - /// else None - pub const fn as_signed(&self) -> Option<&TransactionSigned> { - match self { - Self::Signed(tx) => Some(tx), - _ => None, - } - } - - /// Returns `TransactionSignedEcRecovered` type - /// else None - pub const fn as_signed_ec_recovered(&self) -> Option<&TransactionSignedEcRecovered> { - match self { - Self::SignedEcRecovered(tx) => Some(tx), - _ => None, - } - } - - /// Returns true if the transaction is of [`TransactionSigned`] variant - pub const fn is_signed(&self) -> bool { - matches!(self, Self::Signed(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedNoHash`] variant - pub const fn is_signed_no_hash(&self) -> bool { - matches!(self, Self::SignedNoHash(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedEcRecovered`] variant - pub const fn is_signed_ec_recovered(&self) -> bool { - matches!(self, Self::SignedEcRecovered(_)) - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [Transaction] - pub fn into_raw(self) -> Transaction { - match self { - Self::SignedNoHash(tx) => tx.transaction, - Self::Signed(tx) => tx.transaction, - Self::SignedEcRecovered(tx) => tx.signed_transaction.transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { - match self { - Self::SignedNoHash(tx) => tx.with_hash(), - Self::Signed(tx) => tx, - Self::SignedEcRecovered(tx) => tx.signed_transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns `None` if the transaction's signature is invalid - pub fn into_signed_ec_recovered(self) -> Option { - self.try_into_signed_ec_recovered().ok() - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns an error if the transaction's signature is invalid. - pub fn try_into_signed_ec_recovered( - self, - ) -> Result { - match self { - Self::SignedEcRecovered(tx) => Ok(tx), - Self::Signed(tx) => tx.try_into_ecrecovered(), - Self::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), - } - } -} - -impl AsRef for TransactionSignedVariant { - fn as_ref(&self) -> &Transaction { - self.as_raw() - } -} - -impl Deref for TransactionSignedVariant { - type Target = Transaction; - - fn deref(&self) -> &Self::Target { - self.as_raw() - } -} From c869c7118365d65251dfd7a0c9fc703ec4485495 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:42:36 +0100 Subject: [PATCH 659/970] Revert "fix: add arbitrary feature for op cli dev" (#12808) --- crates/optimism/cli/Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index ba36568efe8..d090075927a 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -111,8 +111,7 @@ jemalloc = [ dev = [ "dep:proptest", - "reth-cli-commands/arbitrary", - "reth-optimism-primitives/arbitrary" + "reth-cli-commands/arbitrary" ] serde = [ "alloy-consensus?/serde", From 795e29cb5b88f674e8d873b86009b3956b3c96f6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:54:39 +0100 Subject: [PATCH 660/970] Revert "chore(sdk): Add MaybeArbitrary as super trait" (#12809) --- Cargo.lock | 242 +++++++++--------- crates/optimism/node/Cargo.toml | 1 - crates/optimism/primitives/Cargo.toml | 14 - crates/optimism/primitives/src/tx_type.rs | 7 +- crates/primitives-traits/src/block/body.rs | 3 +- crates/primitives-traits/src/block/header.rs | 4 +- crates/primitives-traits/src/block/mod.rs | 16 +- crates/primitives-traits/src/header/sealed.rs | 7 +- crates/primitives-traits/src/receipt.rs | 5 +- .../src/transaction/tx_type.rs | 3 +- 10 files changed, 131 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ae0574414e..8a92aadda19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -418,7 +418,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -643,7 +643,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -659,7 +659,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "syn-solidity", "tiny-keccak", ] @@ -675,7 +675,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "syn-solidity", ] @@ -881,7 +881,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1104,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1115,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1153,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1259,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1441,7 +1441,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "synstructure", ] @@ -1548,9 +1548,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] @@ -1563,7 +1563,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -1781,7 +1781,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2225,7 +2225,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2249,7 +2249,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2260,7 +2260,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2382,7 +2382,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2393,7 +2393,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2414,7 +2414,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "unicode-xid", ] @@ -2528,7 +2528,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2678,7 +2678,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2689,7 +2689,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -2746,7 +2746,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -3307,7 +3307,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -3486,9 +3486,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -3751,9 +3751,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -3833,7 +3833,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -3983,7 +3983,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -4044,13 +4044,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 1.0.109", ] [[package]] @@ -4172,7 +4172,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -4273,9 +4273,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.13" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jni" @@ -4420,7 +4420,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -4580,9 +4580,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -4602,9 +4602,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.10" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ "asn1_der", "bs58", @@ -4838,7 +4838,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -4965,9 +4965,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", @@ -4979,14 +4979,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -5234,7 +5234,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -5287,9 +5287,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" +checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5305,9 +5305,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" +checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5320,9 +5320,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" +checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" dependencies = [ "alloy-consensus", "alloy-network", @@ -5335,9 +5335,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" +checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5358,9 +5358,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" +checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5377,9 +5377,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" +checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5471,9 +5471,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5482,20 +5482,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 1.0.109", ] [[package]] @@ -5610,7 +5609,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -5639,7 +5638,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -5811,7 +5810,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -5862,14 +5861,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] name = "proc-macro2" -version = "1.0.91" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307e3004becf10f5a6e0d59d20f3cd28231b0e0827a96cd3e0ce6d14bc1e4bb3" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -5960,7 +5959,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -6317,7 +6316,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper 1.0.1", "tokio", "tokio-rustls", "tokio-util", @@ -6772,7 +6771,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -8390,7 +8389,6 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "arbitrary", "bytes", "derive_more 1.0.0", "op-alloy-consensus", @@ -9685,7 +9683,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.87", "unicode-ident", ] @@ -9767,9 +9765,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -9780,9 +9778,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.17" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -9925,9 +9923,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -10039,9 +10037,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.3" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" dependencies = [ "pest", ] @@ -10075,14 +10073,14 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10110,7 +10108,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10161,7 +10159,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10184,7 +10182,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10467,7 +10465,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10525,9 +10523,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -10543,7 +10541,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10554,9 +10552,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" dependencies = [ "futures-core", ] @@ -10569,7 +10567,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10646,7 +10644,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10694,7 +10692,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10705,7 +10703,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -10882,7 +10880,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11010,9 +11008,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", "base64 0.22.1", @@ -11083,7 +11081,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11332,9 +11330,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" @@ -11481,7 +11479,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11552,7 +11550,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -11586,7 +11584,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11612,9 +11610,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" dependencies = [ "futures", "js-sys", @@ -11752,7 +11750,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11763,7 +11761,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11774,7 +11772,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -11785,7 +11783,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -12060,7 +12058,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "synstructure", ] @@ -12082,7 +12080,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -12102,7 +12100,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", "synstructure", ] @@ -12123,7 +12121,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] @@ -12145,7 +12143,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.87", ] [[package]] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index fbc055a82e9..18ceee8ef8b 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -119,7 +119,6 @@ test-utils = [ "reth-trie-db/test-utils", "revm/test-utils", "reth-optimism-node/test-utils", - "reth-optimism-primitives/arbitrary", ] reth-codec = [ "reth-primitives/reth-codec", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 33f936b2fd1..fc368807736 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -34,13 +34,9 @@ serde = { workspace = true, optional = true } # misc derive_more.workspace = true -# test-utils -arbitrary = { workspace = true, features = ["derive"], optional = true } - [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } rstest.workspace = true -arbitrary.workspace = true [features] default = ["std", "reth-codec"] @@ -69,13 +65,3 @@ serde = [ "reth-codecs/serde", "op-alloy-consensus/serde", ] -arbitrary = [ - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "reth-primitives/arbitrary", - "reth-codecs?/arbitrary", - "op-alloy-consensus/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", -] \ No newline at end of file diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index 9976221b424..c6e7fcc0a80 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -2,11 +2,10 @@ //! `OpTxType` implements `reth_primitives_traits::TxType`. //! This type is required because a `Compact` impl is needed on the deposit tx type. -use core::fmt::Debug; - use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; +use core::fmt::Debug; use derive_more::{ derive::{From, Into}, Display, @@ -14,10 +13,8 @@ use derive_more::{ use op_alloy_consensus::OpTxType as AlloyOpTxType; use reth_primitives_traits::{InMemorySize, TxType}; -/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement -/// [`TxType`] trait. +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement [`TxType`] trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[into(u8)] pub struct OpTxType(AlloyOpTxType); diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index fd7f7f1c631..ff41536ba3f 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -4,7 +4,7 @@ use alloc::fmt; use alloy_consensus::Transaction; -use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; +use crate::{FullSignedTx, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -26,7 +26,6 @@ pub trait BlockBody: + alloy_rlp::Decodable + InMemorySize + MaybeSerde - + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. type Transaction: Transaction; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 26806808532..695e63ed10e 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::Sealable; -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. @@ -28,7 +28,6 @@ pub trait BlockHeader: + Sealable + InMemorySize + MaybeSerde - + MaybeArbitrary { } @@ -47,6 +46,5 @@ impl BlockHeader for T where + Sealable + InMemorySize + MaybeSerde - + MaybeArbitrary { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index c0f5a1ffc63..3f4fbd343ee 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,9 +5,7 @@ pub mod header; use alloc::fmt; -use crate::{ - BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, -}; +use crate::{BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: @@ -28,17 +26,7 @@ impl FullBlock for T where // senders #[auto_impl::auto_impl(&, Arc)] pub trait Block: - Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + InMemorySize - + MaybeSerde - + MaybeArbitrary + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + InMemorySize + MaybeSerde { /// Header part of the block. type Header: BlockHeader + 'static; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 08add0ac3c1..f0a6869ed1e 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -159,12 +159,9 @@ impl From> for Sealed { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader -where - H: for<'b> arbitrary::Arbitrary<'b> + Sealable, -{ +impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = H::arbitrary(u)?; + let header = Header::arbitrary(u)?; Ok(Self::seal(header)) } diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2af40c447e..4370d2ac00f 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,12 +1,12 @@ //! Receipt abstraction -use alloc::vec::Vec; use core::fmt; +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + MaybeCompact {} @@ -27,7 +27,6 @@ pub trait Receipt: + alloy_rlp::Decodable + MaybeSerde + InMemorySize - + MaybeArbitrary { /// Returns transaction type. fn tx_type(&self) -> u8; diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index d2caebe4c9f..931fcb773bf 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::{U64, U8}; -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; +use crate::{InMemorySize, MaybeCompact}; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. @@ -33,7 +33,6 @@ pub trait TxType: + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize - + MaybeArbitrary { /// Returns `true` if this is a legacy transaction. fn is_legacy(&self) -> bool; From 7c5cb90e9ae22204af332b4d0851d3ede608732a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:27:51 +0100 Subject: [PATCH 661/970] feat: add signedtx for pooled tx (#12799) --- crates/primitives/src/transaction/pooled.rs | 60 +++++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index dff6d090096..cecc995ddba 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,12 +1,17 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; -use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use super::{ + error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, + TxEip7702, +}; +use crate::{ + BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, +}; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - Signed, TxEip4844WithSidecar, + SignableTransaction, Signed, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -19,8 +24,9 @@ use alloy_primitives::{ }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -220,6 +226,18 @@ impl PooledTransactionsElement { } } +impl Default for PooledTransactionsElement { + fn default() -> Self { + Self::Legacy(TxLegacy::default().into_signed(Signature::test_signature())) + } +} + +impl Hash for PooledTransactionsElement { + fn hash(&self, state: &mut H) { + self.trie_hash().hash(state); + } +} + impl Encodable for PooledTransactionsElement { /// This encodes the transaction _with_ the signature, and an rlp header. /// @@ -560,6 +578,40 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } } +impl SignedTransaction for PooledTransactionsElement { + type Type = TxType; + + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.hash(), + } + } + + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.signature(), + } + } + + fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer(self.signature(), signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer_unchecked(self.signature(), signature_hash) + } +} + impl InMemorySize for PooledTransactionsElement { fn size(&self) -> usize { match self { From 9b289351b6835774b7398afcbcb66e7c2ef6e7f7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 23 Nov 2024 15:33:19 +0100 Subject: [PATCH 662/970] feat: use defined pool type internally (#12803) --- crates/transaction-pool/src/pool/mod.rs | 84 ++++++++++--------------- crates/transaction-pool/src/traits.rs | 11 +++- 2 files changed, 43 insertions(+), 52 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 8c17da783ac..1a23bf3e07c 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -78,7 +78,8 @@ use crate::{ PoolTransaction, PropagatedTransactions, TransactionOrigin, }, validate::{TransactionValidationOutcome, ValidPoolTransaction}, - CanonicalStateUpdate, PoolConfig, TransactionOrdering, TransactionValidator, + CanonicalStateUpdate, EthPoolTransaction, PoolConfig, TransactionOrdering, + TransactionValidator, }; use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; @@ -87,9 +88,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; -use reth_primitives::{ - BlobTransaction, PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered, -}; +use reth_primitives::PooledTransactionsElement; use std::{ collections::{HashMap, HashSet}, fmt, @@ -312,18 +311,33 @@ where self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() } - /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. + /// Converts the internally tracked transaction to the pooled format. /// - /// Caution: this assumes the given transaction is eip-4844 - fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { - if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = - BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) - { - return Some(blob) - } + /// If the transaction is an EIP-4844 transaction, the blob sidecar is fetched from the blob + /// store and attached to the transaction. + fn to_pooled_transaction( + &self, + transaction: Arc>, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> + where + ::Transaction: EthPoolTransaction, + { + if transaction.is_eip4844() { + let sidecar = self.blob_store.get(*transaction.hash()).ok()??; + transaction.transaction.clone().try_into_pooled_eip4844(sidecar) + } else { + transaction + .transaction + .clone() + .try_into_pooled() + .inspect_err(|err| { + debug!( + target: "txpool", %err, + "failed to convert transaction to pooled element; skipping", + ); + }) + .ok() } - None } /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. @@ -333,39 +347,19 @@ where limit: GetPooledTransactionLimit, ) -> Vec where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { let encoded_len = transaction.encoded_length(); - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - let pooled = if tx.is_eip4844() { - // for EIP-4844 transactions, we need to fetch the blob sidecar from the blob store - if let Some(blob) = self.get_blob_transaction(tx) { - PooledTransactionsElement::BlobTransaction(blob) - } else { - continue - } - } else { - match PooledTransactionsElement::try_from(tx) { - Ok(element) => element, - Err(err) => { - debug!( - target: "txpool", %err, - "failed to convert transaction to pooled element; skipping", - ); - continue - } - } + let Some(pooled) = self.to_pooled_transaction(transaction) else { + continue; }; size += encoded_len; - elements.push(pooled); + elements.push(pooled.into()); if limit.exceeds(size) { break @@ -381,19 +375,9 @@ where tx_hash: TxHash, ) -> Option where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|transaction| { - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - if tx.is_eip4844() { - self.get_blob_transaction(tx).map(PooledTransactionsElement::BlobTransaction) - } else { - PooledTransactionsElement::try_from(tx).ok() - } - }) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx).map(Into::into)) } /// Updates the entire pool after a new block was executed. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 27bed950c50..9d19105b5da 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -932,7 +932,7 @@ impl BestTransactionsAttributes { /// a subset of the `Pooled` format. pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Associated error type for the `try_from_consensus` method. - type TryFromConsensusError; + type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. type Consensus: From + TryInto; @@ -955,6 +955,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { pooled.into() } + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_into_pooled(self) -> Result { + Self::try_consensus_into_pooled(self.into_consensus()) + } + /// Tries to convert the `Consensus` type into the `Pooled` type. fn try_consensus_into_pooled( tx: Self::Consensus, @@ -1084,7 +1089,9 @@ pub trait EthPoolTransaction: Consensus: From + Into + Into, - Pooled: From + Into, + Pooled: From + + Into + + Into, > { /// Extracts the blob sidecar from the transaction. From 6695d07c656e7461efeb8cd5e76d54628b1cce20 Mon Sep 17 00:00:00 2001 From: "0xriazaka.eth" <168359025+0xriazaka@users.noreply.github.com> Date: Sun, 24 Nov 2024 07:53:01 +0100 Subject: [PATCH 663/970] Make PostExectuionInput generic over receipt (#12814) Co-authored-by: Emilia Hane --- crates/consensus/consensus/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index da90439af7f..3ad53456cbd 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -30,16 +30,16 @@ pub mod test_utils; /// Post execution input passed to [`Consensus::validate_block_post_execution`]. #[derive(Debug)] -pub struct PostExecutionInput<'a> { +pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. - pub receipts: &'a [Receipt], + pub receipts: &'a [R], /// EIP-7685 requests of the block. pub requests: &'a Requests, } -impl<'a> PostExecutionInput<'a> { +impl<'a, R> PostExecutionInput<'a, R> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a Requests) -> Self { + pub const fn new(receipts: &'a [R], requests: &'a Requests) -> Self { Self { receipts, requests } } } From 0d6ebec5746838127b0e67d45e3f9faf6b67ee71 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 24 Nov 2024 08:46:16 +0100 Subject: [PATCH 664/970] Revert "Revert "chore(sdk): Add MaybeArbitrary as super trait"" (#12810) Co-authored-by: Emilia Hane --- Cargo.lock | 242 +++++++++--------- crates/optimism/bin/Cargo.toml | 3 +- crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 1 + crates/optimism/primitives/Cargo.toml | 14 + crates/optimism/primitives/src/tx_type.rs | 7 +- crates/primitives-traits/src/block/body.rs | 3 +- crates/primitives-traits/src/block/header.rs | 4 +- crates/primitives-traits/src/block/mod.rs | 16 +- crates/primitives-traits/src/header/sealed.rs | 7 +- crates/primitives-traits/src/receipt.rs | 5 +- .../src/transaction/tx_type.rs | 3 +- crates/storage/provider/Cargo.toml | 1 + 13 files changed, 175 insertions(+), 133 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a92aadda19..9ae0574414e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -418,7 +418,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -643,7 +643,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -659,7 +659,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", "tiny-keccak", ] @@ -675,7 +675,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", ] @@ -881,7 +881,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1104,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1115,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1153,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1259,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1441,7 +1441,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -1548,9 +1548,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" dependencies = [ "bytemuck_derive", ] @@ -1563,7 +1563,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1781,7 +1781,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2225,7 +2225,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2249,7 +2249,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2260,7 +2260,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2382,7 +2382,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2393,7 +2393,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2414,7 +2414,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "unicode-xid", ] @@ -2528,7 +2528,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2678,7 +2678,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2689,7 +2689,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2746,7 +2746,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3307,7 +3307,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3486,9 +3486,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -3751,9 +3751,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3833,7 +3833,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3983,7 +3983,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4044,13 +4044,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4172,7 +4172,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4273,9 +4273,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jni" @@ -4420,7 +4420,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4580,9 +4580,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libloading" @@ -4602,9 +4602,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58", @@ -4838,7 +4838,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4965,9 +4965,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -4979,14 +4979,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5234,7 +5234,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5287,9 +5287,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72da577a88d35b893fae6467112651f26ef023434c196b2a0b3dc75bc853e0e4" +checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5305,9 +5305,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818180672dd14ca6642fb57942e1cbd602669f42b6e0222b7ea9bbcae065d67e" +checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5320,9 +5320,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f82e805bad171ceae2af45efaecf8d0b50622cff3473e3c998ff1dd340de35" +checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" dependencies = [ "alloy-consensus", "alloy-network", @@ -5335,9 +5335,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1803a1ac96203b8f713b1fa9b7509c46c645ca7bc22b582761a7495e999d4301" +checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5358,9 +5358,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a838c125256e02e2f9da88c51e263b02a06cda7e60382fe2551a3385b516f5bb" +checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5377,9 +5377,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c227fcc7d81d4023363ba12406e57ebcc1c7cbb1075c38ea471ae32138d4706d" +checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5471,9 +5471,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5482,19 +5482,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -5609,7 +5610,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5638,7 +5639,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5810,7 +5811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5861,14 +5862,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "307e3004becf10f5a6e0d59d20f3cd28231b0e0827a96cd3e0ce6d14bc1e4bb3" dependencies = [ "unicode-ident", ] @@ -5959,7 +5960,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -6316,7 +6317,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-util", @@ -6771,7 +6772,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -8389,6 +8390,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "arbitrary", "bytes", "derive_more 1.0.0", "op-alloy-consensus", @@ -9683,7 +9685,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.87", + "syn 2.0.89", "unicode-ident", ] @@ -9765,9 +9767,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9778,9 +9780,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", @@ -9923,9 +9925,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -10037,9 +10039,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -10073,14 +10075,14 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10108,7 +10110,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10159,7 +10161,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10182,7 +10184,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10465,7 +10467,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10523,9 +10525,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -10541,7 +10543,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10552,9 +10554,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -10567,7 +10569,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10644,7 +10646,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10692,7 +10694,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10703,7 +10705,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10880,7 +10882,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11008,9 +11010,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", @@ -11081,7 +11083,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11330,9 +11332,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -11479,7 +11481,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11550,7 +11552,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -11584,7 +11586,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11610,9 +11612,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" dependencies = [ "futures", "js-sys", @@ -11750,7 +11752,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11761,7 +11763,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11772,7 +11774,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11783,7 +11785,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12058,7 +12060,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12080,7 +12082,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12100,7 +12102,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12121,7 +12123,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12143,7 +12145,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 77166763100..45f4492e82b 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -48,7 +48,8 @@ optimism = [ ] dev = [ - "reth-optimism-cli/dev" + "reth-optimism-cli/dev", + "reth-optimism-primitives/arbitrary", ] min-error-logs = ["tracing/release_max_level_error"] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 98496bb2653..807f224ca4b 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -48,7 +48,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true -reth-optimism-primitives.workspace = true +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] default = ["std"] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 18ceee8ef8b..fbc055a82e9 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -119,6 +119,7 @@ test-utils = [ "reth-trie-db/test-utils", "revm/test-utils", "reth-optimism-node/test-utils", + "reth-optimism-primitives/arbitrary", ] reth-codec = [ "reth-primitives/reth-codec", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index fc368807736..33f936b2fd1 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -34,9 +34,13 @@ serde = { workspace = true, optional = true } # misc derive_more.workspace = true +# test-utils +arbitrary = { workspace = true, features = ["derive"], optional = true } + [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } rstest.workspace = true +arbitrary.workspace = true [features] default = ["std", "reth-codec"] @@ -65,3 +69,13 @@ serde = [ "reth-codecs/serde", "op-alloy-consensus/serde", ] +arbitrary = [ + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", +] \ No newline at end of file diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/tx_type.rs index c6e7fcc0a80..9976221b424 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/tx_type.rs @@ -2,10 +2,11 @@ //! `OpTxType` implements `reth_primitives_traits::TxType`. //! This type is required because a `Compact` impl is needed on the deposit tx type. +use core::fmt::Debug; + use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; use bytes::BufMut; -use core::fmt::Debug; use derive_more::{ derive::{From, Into}, Display, @@ -13,8 +14,10 @@ use derive_more::{ use op_alloy_consensus::OpTxType as AlloyOpTxType; use reth_primitives_traits::{InMemorySize, TxType}; -/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement [`TxType`] trait. +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement +/// [`TxType`] trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[into(u8)] pub struct OpTxType(AlloyOpTxType); diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index ff41536ba3f..fd7f7f1c631 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -4,7 +4,7 @@ use alloc::fmt; use alloy_consensus::Transaction; -use crate::{FullSignedTx, InMemorySize, MaybeSerde}; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -26,6 +26,7 @@ pub trait BlockBody: + alloy_rlp::Decodable + InMemorySize + MaybeSerde + + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. type Transaction: Transaction; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 695e63ed10e..26806808532 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::Sealable; -use crate::{InMemorySize, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. @@ -28,6 +28,7 @@ pub trait BlockHeader: + Sealable + InMemorySize + MaybeSerde + + MaybeArbitrary { } @@ -46,5 +47,6 @@ impl BlockHeader for T where + Sealable + InMemorySize + MaybeSerde + + MaybeArbitrary { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 3f4fbd343ee..c0f5a1ffc63 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,7 +5,9 @@ pub mod header; use alloc::fmt; -use crate::{BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde}; +use crate::{ + BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, +}; /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: @@ -26,7 +28,17 @@ impl FullBlock for T where // senders #[auto_impl::auto_impl(&, Arc)] pub trait Block: - Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + InMemorySize + MaybeSerde + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Header part of the block. type Header: BlockHeader + 'static; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f0a6869ed1e..08add0ac3c1 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -159,9 +159,12 @@ impl From> for Sealed { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = Header::arbitrary(u)?; + let header = H::arbitrary(u)?; Ok(Self::seal(header)) } diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 4370d2ac00f..e2af40c447e 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,12 +1,12 @@ //! Receipt abstraction +use alloc::vec::Vec; use core::fmt; -use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::B256; -use crate::{InMemorySize, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. pub trait FullReceipt: Receipt + MaybeCompact {} @@ -27,6 +27,7 @@ pub trait Receipt: + alloy_rlp::Decodable + MaybeSerde + InMemorySize + + MaybeArbitrary { /// Returns transaction type. fn tx_type(&self) -> u8; diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index 931fcb773bf..d2caebe4c9f 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::{U64, U8}; -use crate::{InMemorySize, MaybeCompact}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. @@ -33,6 +33,7 @@ pub trait TxType: + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize + + MaybeArbitrary { /// Returns `true` if this is a legacy transaction. fn is_legacy(&self) -> bool; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 974de01e004..86f2f3a51b9 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -131,4 +131,5 @@ test-utils = [ "revm/test-utils", "reth-prune-types/test-utils", "reth-stages-types/test-utils", + "reth-optimism-primitives?/arbitrary", ] From a552b1ffc9a6a40a84d3846c789ef1b46c8ab547 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Sun, 24 Nov 2024 15:57:34 +0700 Subject: [PATCH 665/970] chore: make `EngineSyncEvent` generic over data primitives (#12827) --- crates/consensus/beacon/src/engine/sync.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index b6e75f802e3..b140846981e 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -10,7 +10,7 @@ use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, EthBlockClient, }; -use reth_primitives::SealedBlock; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -361,9 +361,9 @@ impl Ord for OrderedSealedBlock { /// The event type emitted by the [`EngineSyncController`]. #[derive(Debug)] -pub(crate) enum EngineSyncEvent { +pub(crate) enum EngineSyncEvent { /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), + FetchedFullBlock(SealedBlock), /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. From 21bc75df39ee823b3ae9f59623a55d6d74dcd99f Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Sun, 24 Nov 2024 04:08:36 -0500 Subject: [PATCH 666/970] feat: start implementing OpTransaction (#12529) Co-authored-by: Emilia Hane --- crates/optimism/primitives/Cargo.toml | 8 +- crates/optimism/primitives/src/lib.rs | 4 +- .../primitives/src/transaction/mod.rs | 173 ++++++++++++++++++ .../src/{ => transaction}/tx_type.rs | 0 crates/primitives/src/transaction/mod.rs | 6 + crates/primitives/src/transaction/tx_type.rs | 20 +- 6 files changed, 204 insertions(+), 7 deletions(-) create mode 100644 crates/optimism/primitives/src/transaction/mod.rs rename crates/optimism/primitives/src/{ => transaction}/tx_type.rs (100%) diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 33f936b2fd1..e7200c40ed8 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-node-types.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true -reth-codecs = { workspace = true, optional = true } +reth-codecs = { workspace = true, optional = true, features = ["optimism"] } # ethereum alloy-primitives.workspace = true @@ -34,7 +34,7 @@ serde = { workspace = true, optional = true } # misc derive_more.workspace = true -# test-utils +# test arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] @@ -66,7 +66,7 @@ serde = [ "alloy-consensus/serde", "alloy-eips/serde", "bytes/serde", - "reth-codecs/serde", + "reth-codecs?/serde", "op-alloy-consensus/serde", ] arbitrary = [ @@ -78,4 +78,4 @@ arbitrary = [ "alloy-consensus/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", -] \ No newline at end of file +] diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 334440ea106..0f4608a8ebe 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -9,9 +9,9 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; -pub mod tx_type; +pub mod transaction; -pub use tx_type::OpTxType; +pub use transaction::{tx_type::OpTxType, OpTransaction}; use alloy_consensus::Header; use reth_node_types::NodePrimitives; diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs new file mode 100644 index 00000000000..070b3d984e0 --- /dev/null +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -0,0 +1,173 @@ +//! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. + +pub mod tx_type; + +use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; + +use alloy_consensus::{constants::EIP7702_TX_TYPE_ID, TxLegacy}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use derive_more::{Deref, From}; +use op_alloy_consensus::{OpTypedTransaction, DEPOSIT_TX_TYPE_ID}; +use reth_codecs::Compact; +use reth_primitives::transaction::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; +use reth_primitives_traits::InMemorySize; + +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From)] +/// Optimistic transaction. +pub struct OpTransaction(OpTypedTransaction); + +impl Default for OpTransaction { + fn default() -> Self { + Self(OpTypedTransaction::Legacy(TxLegacy::default())) + } +} + +impl Compact for OpTransaction { + fn to_compact(&self, out: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.to_compact(out), + OpTypedTransaction::Eip2930(tx) => tx.to_compact(out), + OpTypedTransaction::Eip1559(tx) => tx.to_compact(out), + OpTypedTransaction::Eip7702(tx) => tx.to_compact(out), + OpTypedTransaction::Deposit(tx) => tx.to_compact(out), + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + + match identifier { + COMPACT_IDENTIFIER_LEGACY => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Legacy(tx)), buf) + } + COMPACT_IDENTIFIER_EIP2930 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip2930::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip2930(tx)), buf) + } + COMPACT_IDENTIFIER_EIP1559 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip1559::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip1559(tx)), buf) + } + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + // An identifier of 3 indicates that the transaction type did not fit into + // the backwards compatible 2 bit identifier, their transaction types are + // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, + // we need to read the concrete transaction type from the buffer by + // reading the full 8 bits (single byte) and match on this transaction type. + let identifier = buf.get_u8(); + match identifier { + EIP7702_TX_TYPE_ID => { + let (tx, buf) = + alloy_consensus::transaction::TxEip7702::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip7702(tx)), buf) + } + DEPOSIT_TX_TYPE_ID => { + let (tx, buf) = op_alloy_consensus::TxDeposit::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Deposit(tx)), buf) + } + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), + } + } + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), + } + } +} + +impl alloy_consensus::Transaction for OpTransaction { + fn chain_id(&self) -> Option { + self.0.chain_id() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + + fn gas_limit(&self) -> u64 { + self.0.gas_limit() + } + + fn gas_price(&self) -> Option { + self.0.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.0.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.0.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.0.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.0.priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.0.kind() + } + + fn value(&self) -> Uint<256, 4> { + self.0.value() + } + + fn input(&self) -> &Bytes { + self.0.input() + } + + fn ty(&self) -> u8 { + self.0.ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.0.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.0.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.0.authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.0.is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.0.effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.0.effective_tip_per_gas(base_fee) + } +} + +impl InMemorySize for OpTransaction { + fn size(&self) -> usize { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.size(), + OpTypedTransaction::Eip2930(tx) => tx.size(), + OpTypedTransaction::Eip1559(tx) => tx.size(), + OpTypedTransaction::Eip7702(tx) => tx.size(), + OpTypedTransaction::Deposit(tx) => tx.size(), + } + } +} diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs similarity index 100% rename from crates/optimism/primitives/src/tx_type.rs rename to crates/optimism/primitives/src/transaction/tx_type.rs diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ca03bfe4f7c..b8a3f4a719b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -56,6 +56,12 @@ pub mod signature; pub(crate) mod util; +#[cfg(any(test, feature = "reth-codec"))] +pub use tx_type::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; + /// Expected number of transactions where we can expect a speed-up by recovering the senders in /// parallel. pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 0e344374d20..784a976ab79 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -8,6 +8,24 @@ use derive_more::Display; use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; +/// Identifier parameter for legacy transaction +#[cfg(any(test, feature = "reth-codec"))] +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +#[cfg(any(test, feature = "reth-codec"))] +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +#[cfg(any(test, feature = "reth-codec"))] +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; + +/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. +#[cfg(any(test, feature = "reth-codec"))] +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; + /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on @@ -256,7 +274,7 @@ impl Decodable for TxType { mod tests { use super::*; use alloy_primitives::hex; - use reth_codecs::{txtype::*, Compact}; + use reth_codecs::Compact; use reth_primitives_traits::TxType as _; use rstest::rstest; From e020eb71bd501e512e49d7d5d8dcf091ec0c42ec Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sun, 24 Nov 2024 11:28:31 +0100 Subject: [PATCH 667/970] chore(payload): fix withdrawals field pre-shanghai in Ethereum payload (#12828) --- crates/ethereum/payload/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 24312fecbf4..b2f78da6de9 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -440,14 +440,14 @@ where requests_hash, }; + let withdrawals = chain_spec + .is_shanghai_active_at_timestamp(attributes.timestamp) + .then(|| attributes.withdrawals.clone()); + // seal the block let block = Block { header, - body: BlockBody { - transactions: executed_txs, - ommers: vec![], - withdrawals: Some(attributes.withdrawals.clone()), - }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; let sealed_block = Arc::new(block.seal_slow()); From 8958e9f4df42cdf2a1ca443efeb168db7ead1dea Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sun, 24 Nov 2024 11:50:02 +0100 Subject: [PATCH 668/970] chore(ci): run hive 2x/day (#12829) --- .github/workflows/hive.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 6c50923d3e6..b8d3f378fca 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -5,8 +5,8 @@ name: hive on: workflow_dispatch: schedule: - # every day - - cron: "0 0 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always From 736edf70ad5227141cf43c0320208c86b608bd6b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 24 Nov 2024 12:03:56 +0100 Subject: [PATCH 669/970] fix: check withdrawals in op builder (#12832) --- crates/optimism/payload/src/builder.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index ec766876836..132c2649272 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -3,13 +3,13 @@ use std::{fmt::Display, sync::Arc}; use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::merge::BEACON_NONCE; +use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::ChainSpecProvider; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; @@ -416,7 +416,7 @@ where body: BlockBody { transactions: info.executed_transactions, ommers: vec![], - withdrawals: Some(ctx.attributes().payload_attributes.withdrawals.clone()), + withdrawals: ctx.withdrawals().cloned(), }, }; @@ -560,6 +560,13 @@ impl OpPayloadBuilderCtx { &self.config.attributes } + /// Returns the withdrawals if shanghai is active. + pub fn withdrawals(&self) -> Option<&Withdrawals> { + self.chain_spec + .is_shanghai_active_at_timestamp(self.attributes().timestamp()) + .then(|| &self.attributes().payload_attributes.withdrawals) + } + /// Returns the block gas limit to target. pub fn block_gas_limit(&self) -> u64 { self.attributes() From c8e5b233ef242c4c5eaebe6dc5ae66b98184d8a8 Mon Sep 17 00:00:00 2001 From: morito Date: Sun, 24 Nov 2024 20:08:30 +0900 Subject: [PATCH 670/970] Use `adjust_index_for_rlp` from alloy (#12815) --- crates/trie/common/src/root.rs | 11 ----------- crates/trie/trie/benches/trie_root.rs | 3 ++- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index dbcbf4200d7..982dec98837 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -7,17 +7,6 @@ use alloy_trie::HashBuilder; use itertools::Itertools; use nybbles::Nibbles; -/// Adjust the index of an item for rlp encoding. -pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { - if i > 0x7f { - i - } else if i == 0x7f || i + 1 == len { - 0 - } else { - i + 1 - } -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index ad169936463..893e6e9e999 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -44,7 +44,8 @@ criterion_main!(benches); mod implementations { use super::*; use alloy_rlp::Encodable; - use reth_trie_common::{root::adjust_index_for_rlp, HashBuilder, Nibbles}; + use alloy_trie::root::adjust_index_for_rlp; + use reth_trie_common::{HashBuilder, Nibbles}; pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { triehash::ordered_trie_root::(receipts.iter().map(|receipt| { From 53839a952d528e116359716f3991823dbcc30f3c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 24 Nov 2024 11:33:42 +0000 Subject: [PATCH 671/970] chore(deps): weekly `cargo update` (#12813) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 102 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ae0574414e..6463fd11a01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1057,9 +1057,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -1164,9 +1164,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" +checksum = "ba5289ec98f68f28dd809fd601059e6aa908bb8f6108620930828283d4ee23d7" dependencies = [ "fastrand 2.2.0", "tokio", @@ -1915,9 +1915,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" dependencies = [ "cfg-if", "cpufeatures", @@ -1972,6 +1972,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1998,9 +2008,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -3782,7 +3792,7 @@ dependencies = [ "hyper-util", "log", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls", @@ -4186,9 +4196,9 @@ dependencies = [ [[package]] name = "interprocess" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +checksum = "894148491d817cb36b6f778017b8ac46b17408d522dd90f539d677ea938362eb" dependencies = [ "doctest-file", "futures-core", @@ -4713,9 +4723,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -5727,9 +5737,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -5867,9 +5877,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307e3004becf10f5a6e0d59d20f3cd28231b0e0827a96cd3e0ce6d14bc1e4bb3" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -6311,7 +6321,7 @@ dependencies = [ "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pemfile", "rustls-pki-types", "serde", @@ -9780,9 +9790,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.17" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", @@ -9803,20 +9813,19 @@ dependencies = [ "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.0.1", ] [[package]] @@ -9843,7 +9852,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" dependencies = [ - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "jni", "log", @@ -9852,7 +9861,7 @@ dependencies = [ "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "webpki-roots", "winapi", @@ -10002,13 +10011,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "libc", "num-bigint", "security-framework-sys", ] +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework-sys" version = "2.12.1" @@ -11404,9 +11426,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna 1.0.3", @@ -11646,9 +11668,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -12042,9 +12064,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -12054,9 +12076,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", @@ -12087,18 +12109,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", From 02824da4fcd0794d45442adbdbf6b953be04d4f3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 24 Nov 2024 12:47:37 +0100 Subject: [PATCH 672/970] chore: remove default bound for txs (#12834) --- crates/primitives-traits/src/node.rs | 11 +---------- crates/primitives-traits/src/transaction/mod.rs | 2 -- crates/primitives-traits/src/transaction/signed.rs | 1 - crates/primitives/src/transaction/pooled.rs | 8 +------- 4 files changed, 2 insertions(+), 20 deletions(-) diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 19f6bd8456a..904ed7d12f1 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -42,16 +42,7 @@ pub trait NodePrimitives: + MaybeSerde + 'static; /// Signed version of the transaction type. - type SignedTx: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; /// Transaction envelope type ID. type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static; /// A receipt. diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index f176382146b..b67e51024bf 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -18,7 +18,6 @@ pub trait Transaction: + Sync + Unpin + Clone - + Default + fmt::Debug + Eq + PartialEq @@ -35,7 +34,6 @@ impl Transaction for T where + Sync + Unpin + Clone - + Default + fmt::Debug + Eq + PartialEq diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 64acbd3415c..ae9a8f0d2ac 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -18,7 +18,6 @@ pub trait SignedTransaction: + Sync + Unpin + Clone - + Default + fmt::Debug + PartialEq + Eq diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index cecc995ddba..2bd344ea2a1 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -11,7 +11,7 @@ use crate::{ use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, Signed, TxEip4844WithSidecar, + Signed, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -226,12 +226,6 @@ impl PooledTransactionsElement { } } -impl Default for PooledTransactionsElement { - fn default() -> Self { - Self::Legacy(TxLegacy::default().into_signed(Signature::test_signature())) - } -} - impl Hash for PooledTransactionsElement { fn hash(&self, state: &mut H) { self.trie_hash().hash(state); From dcaa06a01aca67b56b0bc2ef16bd3ef26834dfc5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 25 Nov 2024 14:50:10 +0400 Subject: [PATCH 673/970] feat: make more block types generic (#12812) --- bin/reth-bench/src/bench/new_payload_fcu.rs | 6 +- bin/reth-bench/src/bench/new_payload_only.rs | 6 +- .../src/commands/debug_cmd/build_block.rs | 8 +- .../commands/debug_cmd/in_memory_merkle.rs | 1 + bin/reth/src/commands/debug_cmd/merkle.rs | 3 +- crates/blockchain-tree/src/blockchain_tree.rs | 6 +- crates/blockchain-tree/src/externals.rs | 30 +- crates/chain-state/src/test_utils.rs | 4 +- crates/cli/commands/src/common.rs | 14 +- .../commands/src/init_state/without_evm.rs | 5 +- crates/consensus/beacon/src/engine/mod.rs | 1 + crates/consensus/common/src/validation.rs | 4 +- crates/e2e-test-utils/src/lib.rs | 7 +- crates/e2e-test-utils/src/node.rs | 2 +- crates/engine/local/src/miner.rs | 13 +- crates/engine/tree/src/persistence.rs | 8 +- crates/engine/tree/src/tree/mod.rs | 1 + crates/engine/util/src/reorg.rs | 2 +- crates/ethereum/evm/src/execute.rs | 4 +- crates/ethereum/node/tests/e2e/dev.rs | 2 +- crates/ethereum/payload/src/lib.rs | 2 +- crates/evm/execution-types/src/chain.rs | 6 +- crates/exex/exex/src/backfill/job.rs | 2 +- crates/exex/exex/src/backfill/test_utils.rs | 18 +- crates/exex/exex/src/manager.rs | 2 +- crates/exex/exex/src/notifications.rs | 4 +- crates/exex/exex/src/wal/mod.rs | 10 +- crates/exex/test-utils/src/lib.rs | 4 +- crates/node/builder/src/builder/mod.rs | 6 +- crates/node/builder/src/launch/engine.rs | 8 +- crates/node/builder/src/launch/mod.rs | 9 +- crates/node/builder/src/setup.rs | 6 +- crates/node/types/src/lib.rs | 3 + crates/optimism/payload/src/builder.rs | 4 +- crates/payload/builder/src/lib.rs | 2 +- crates/payload/builder/src/test_utils.rs | 2 +- crates/payload/validator/src/lib.rs | 2 +- crates/primitives-traits/src/block/body.rs | 6 +- crates/primitives-traits/src/block/mod.rs | 12 +- crates/primitives/src/block.rs | 312 +++++++++--------- crates/primitives/src/lib.rs | 3 + crates/primitives/src/proofs.rs | 8 +- crates/primitives/src/traits.rs | 123 +++++++ crates/primitives/src/traits/mod.rs | 9 - crates/primitives/src/transaction/mod.rs | 21 ++ crates/rpc/rpc-builder/tests/it/auth.rs | 2 +- .../rpc-eth-api/src/helpers/pending_block.rs | 4 +- .../rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc-types-compat/src/block.rs | 2 +- .../rpc-types-compat/src/engine/payload.rs | 3 +- crates/rpc/rpc/src/debug.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 4 +- .../static-file/src/segments/transactions.rs | 2 +- .../src/providers/blockchain_provider.rs | 4 +- .../src/providers/database/provider.rs | 20 +- crates/storage/provider/src/providers/mod.rs | 2 + crates/storage/provider/src/traits/block.rs | 9 +- crates/storage/provider/src/writer/mod.rs | 4 +- .../src/mined_sidecar.rs | 2 + examples/custom-dev-node/src/main.rs | 2 +- .../custom-payload-builder/src/generator.rs | 2 +- examples/db-access/src/main.rs | 2 +- 62 files changed, 490 insertions(+), 289 deletions(-) create mode 100644 crates/primitives/src/traits.rs delete mode 100644 crates/primitives/src/traits/mod.rs diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index dd2f863e2c9..9e573a8957e 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -18,7 +18,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -75,11 +75,11 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let block_number = block.header.number; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 68b2f76527d..0611faabf10 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -16,7 +16,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -60,10 +60,10 @@ impl Command { while let Some(block) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index aa89b4112c3..5fc78e884e9 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -22,11 +22,11 @@ use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{EngineApiMessageVersion, PayloadBuilderAttributes}; +use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - BlobTransaction, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, + BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, @@ -259,7 +259,7 @@ impl> Command { let senders = block.senders().expect("sender recovery failed"); let block_with_senders = - SealedBlockWithSenders::new(block.clone(), senders).unwrap(); + SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); let db = StateProviderDatabase::new(blockchain_db.latest()?); let executor = diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index ce5f318632e..9f82ef0574b 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -20,6 +20,7 @@ use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_ethereum::EthExecutorProvider; +use reth_primitives::BlockExt; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index bb8a6a2c4a1..fe9b76d8c7d 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,6 +17,7 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; +use reth_node_api::BlockTy; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, @@ -144,7 +145,7 @@ impl> Command { for block in blocks.into_iter().rev() { let block_number = block.number; let sealed_block = block - .try_seal_with_senders() + .try_seal_with_senders::>() .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index d2ff0f5c844..bbf1cb09961 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1589,7 +1589,9 @@ mod tests { body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { - let transactions_root = calculate_transaction_root(&body); + let signed_body = + body.clone().into_iter().map(|tx| tx.into_signed()).collect::>(); + let transactions_root = calculate_transaction_root(&signed_body); let receipts = body .iter() .enumerate() @@ -1635,7 +1637,7 @@ mod tests { SealedBlock { header: SealedHeader::seal(header), body: BlockBody { - transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), + transactions: signed_body, ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), }, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 2b9dae9a3df..f61de4c4336 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -5,24 +5,40 @@ use reth_consensus::Consensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; -use reth_primitives::{BlockBody, StaticFileSegment}; +use reth_primitives::StaticFileSegment; use reth_provider::{ - providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, - StaticFileProviderFactory, StatsReader, + providers::{NodeTypesForProvider, ProviderNodeTypes}, + ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, + StatsReader, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. -pub trait TreeNodeTypes: - ProviderNodeTypes> +pub trait NodeTypesForTree: + NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, +> { } -impl TreeNodeTypes for T where - T: ProviderNodeTypes> + +impl NodeTypesForTree for T where + T: NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, + > { } +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. +pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} +impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} + /// A container for external components. /// /// This is a simple container for external components used throughout the blockchain tree diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index af0c363fe48..866a6d74a0b 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -139,7 +139,9 @@ impl TestBlockBuilder { gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root: calculate_transaction_root(&transactions), + transactions_root: calculate_transaction_root( + &transactions.clone().into_iter().map(|tx| tx.into_signed()).collect::>(), + ), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), state_root: state_root_unhashed(HashMap::from([( diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 251e01a105a..4a42d9f29f7 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -197,11 +197,21 @@ impl AccessRights { /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. pub trait CliNodeTypes: NodeTypesWithEngine - + NodeTypesForProvider> + + NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, + > { } impl CliNodeTypes for N where N: NodeTypesWithEngine - + NodeTypesForProvider> + + NodeTypesForProvider< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, + > { } diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index e3594a59363..22236d14c76 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -33,7 +33,7 @@ pub fn setup_without_evm( where Provider: StaticFileProviderFactory + StageCheckpointWriter - + BlockWriter, + + BlockWriter>, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); @@ -64,7 +64,8 @@ fn append_first_block( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter + StaticFileProviderFactory, + Provider: BlockWriter> + + StaticFileProviderFactory, { provider_rw.insert_block( SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 0fedbdd452d..3d0f65423e4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1992,6 +1992,7 @@ mod tests { use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_node_types::FullNodePrimitives; + use reth_primitives::BlockExt; use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 6042f16bf50..1709e3a14f4 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -4,7 +4,9 @@ use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header}; use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives::{ + BlockBody, BlockBodyTxExt, EthereumHardfork, GotExpected, SealedBlock, SealedHeader, +}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 73a7e39f1a4..b9279b16a7f 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + blockchain_tree::externals::NodeTypesForTree, builder::{FullNodePrimitives, NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, rpc::server_types::RpcModuleSelection, @@ -53,13 +54,12 @@ pub async fn setup( attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesForProvider + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, N::AddOns: RethRpcAddOns>, - N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -134,7 +134,8 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - N::Primitives: FullNodePrimitives, + N::Primitives: + FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index c3dff527eb2..09c54a867ce 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -216,7 +216,7 @@ where // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block let head = self.engine_api.canonical_stream.next().await.unwrap(); - let tx = head.tip().transactions().next(); + let tx = head.tip().transactions().first(); assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); loop { diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 3a0f5a2f192..a5c7cf4d4c6 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -210,12 +210,13 @@ where let block = payload.block(); - let cancun_fields = - self.provider.chain_spec().is_cancun_active_at_timestamp(block.timestamp).then(|| { - CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: block.blob_versioned_hashes().into_iter().copied().collect(), - } + let cancun_fields = self + .provider + .chain_spec() + .is_cancun_active_at_timestamp(block.timestamp) + .then(|| CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: block.body.blob_versioned_hashes().into_iter().copied().collect(), }); let (tx, rx) = oneshot::channel(); diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 86d18ceb48c..e56ed30c58b 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -21,11 +21,15 @@ use tracing::{debug, error}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within /// [`PersistenceService`]. pub trait PersistenceNodeTypes: - ProviderNodeTypes> + ProviderNodeTypes< + Primitives: FullNodePrimitives, +> { } impl PersistenceNodeTypes for T where - T: ProviderNodeTypes> + T: ProviderNodeTypes< + Primitives: FullNodePrimitives, + > { } /// Writes parts of reth's in memory tree state to the database and static files. diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 39843377684..66eff882c92 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2609,6 +2609,7 @@ mod tests { use reth_engine_primitives::ForkchoiceStatus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::test_utils::MockExecutorProvider; + use reth_primitives::BlockExt; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index fd80fa9e165..1e2451f24c3 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -18,7 +18,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Receipt, Receipts}; +use reth_primitives::{proofs, Block, BlockBody, BlockExt, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index e339268a99a..f04ff46d9e5 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -319,7 +319,9 @@ mod tests { BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; + use reth_primitives::{ + public_key_to_address, Account, Block, BlockBody, BlockExt, Transaction, + }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index b6d0ffcfaaa..bdf444c8109 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -63,7 +63,7 @@ where let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); + let tx = &head.tip().transactions()[0]; assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index b2f78da6de9..49065ec0d8a 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -27,7 +27,7 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, EthereumHardforks, Receipt, + Block, BlockBody, BlockExt, EthereumHardforks, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 200a37423cf..6aed8422bc3 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -236,7 +236,7 @@ impl Chain { self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { let mut tx_receipts = Vec::with_capacity(receipts.len()); - for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { + for (tx, receipt) in block.body.transactions.iter().zip(receipts.iter()) { tx_receipts.push(( tx.hash(), receipt.as_ref().expect("receipts have not been pruned").clone(), @@ -417,7 +417,7 @@ impl ChainBlocks<'_> { /// Returns an iterator over all transactions in the chain. #[inline] pub fn transactions(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.body.transactions()) + self.blocks.values().flat_map(|block| block.body.transactions.iter()) } /// Returns an iterator over all transactions and their senders. @@ -441,7 +441,7 @@ impl ChainBlocks<'_> { /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash())) + self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.hash())) } } diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 77a7b50477b..08d91e39197 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -8,7 +8,7 @@ use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockWithSenders, Receipt}; +use reth_primitives::{Block, BlockExt, BlockWithSenders, Receipt}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 169d2d758de..861d42f042b 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -11,7 +11,7 @@ use reth_evm::execute::{ use reth_evm_ethereum::execute::EthExecutorProvider; use reth_node_api::FullNodePrimitives; use reth_primitives::{ - Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockExt, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -58,7 +58,12 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, + >, { let provider = provider_factory.provider()?; @@ -162,7 +167,12 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, + >, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -184,7 +194,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index e3d3a3c0690..ea5ddf2e8c6 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1235,7 +1235,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index baf504166d1..6c1e12d8d76 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -400,7 +400,7 @@ mod tests { use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::Block; + use reth_primitives::{Block, BlockExt}; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, @@ -567,7 +567,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let node_head = Head { number: node_head_block.number, diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 41a7829a70f..066fbe1b58c 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -268,21 +268,25 @@ mod tests { // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) .into_iter() - .map(|block| block.seal_with_senders().ok_or_eyre("failed to recover senders")) + .map(|block| { + block + .seal_with_senders::() + .ok_or_eyre("failed to recover senders") + }) .collect::>>()?; let block_1_reorged = random_block( &mut rng, 1, BlockParams { parent: Some(blocks[0].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let block_2_reorged = random_block( &mut rng, 2, BlockParams { parent: Some(blocks[1].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; // Create notifications for the above blocks. diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 5b2267505c5..15c770c5e9e 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -45,7 +45,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{EthPrimitives, Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, EthStorage, ProviderFactory, @@ -306,7 +306,7 @@ pub async fn test_exex_context_with_chain_spec( .block_by_hash(genesis_hash)? .ok_or_else(|| eyre::eyre!("genesis block not found"))? .seal_slow() - .seal_with_senders() + .seal_with_senders::() .ok_or_else(|| eyre::eyre!("failed to recover senders"))?; let head = Head { diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 65ae704fe83..7cbad445da1 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -10,6 +10,7 @@ use crate::{ DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; +use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -358,7 +359,7 @@ where >, > where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, @@ -553,10 +554,9 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, - T::Primitives: FullNodePrimitives, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index ef1edc899eb..6afcace5b15 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -12,6 +12,7 @@ use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, + persistence::PersistenceNodeTypes, tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; @@ -19,8 +20,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, - PayloadBuilder, PayloadTypes, + BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, + PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -70,14 +71,13 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a1819948ee4..4335073b404 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -16,13 +16,15 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; +use reth_blockchain_tree::{ + externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTreeConfig, +}; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; -use reth_node_api::{AddOnsContext, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine}; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, @@ -98,11 +100,10 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine + TreeNodeTypes, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, - Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3258ba8fe54..9b453234019 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,7 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; -use reth_node_api::{FullNodePrimitives, NodePrimitives}; +use reth_node_api::{BodyTy, FullNodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -87,9 +87,7 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
+ 'static, - B: BodyDownloader< - Body = <::Block as reth_node_api::Block>::Body, - > + 'static, + B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, N::Primitives: FullNodePrimitives, { diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index a23b9bfe414..40d0defe24e 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -233,6 +233,9 @@ where type Engine = E; } +/// Helper adapter type for accessing [`NodePrimitives::Block`] on [`NodeTypes`]. +pub type BlockTy = <::Primitives as NodePrimitives>::Block; + /// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 132c2649272..fbf99c78d9e 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -18,7 +18,9 @@ use reth_optimism_forks::OpHardforks; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::PayloadTransactions; -use reth_primitives::{proofs, Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType}; +use reth_primitives::{ + proofs, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, +}; use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 0887a5ca74a..b6191ea7fd1 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -31,7 +31,7 @@ //! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::Block; +//! use reth_primitives::{Block, BlockExt}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 5025a12ed71..4690ca14f0d 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -9,7 +9,7 @@ use alloy_primitives::U256; use reth_chain_state::{CanonStateNotification, ExecutedBlock}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::{PayloadKind, PayloadTypes}; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index e74b5f48d40..0a872a68ddf 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -12,7 +12,7 @@ use alloy_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, }; use reth_chainspec::EthereumHardforks; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockExt, SealedBlock}; use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index fd7f7f1c631..b01ef96d6f5 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,6 +1,6 @@ //! Block body abstraction. -use alloc::fmt; +use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; @@ -12,7 +12,6 @@ pub trait FullBlockBody: BlockBody {} impl FullBlockBody for T where T: BlockBody {} /// Abstraction for block's body. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockBody: Send + Sync @@ -33,4 +32,7 @@ pub trait BlockBody: /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::Transaction]; + + /// Consume the block body and return a [`Vec`] of transactions. + fn into_transactions(self) -> Vec; } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index c0f5a1ffc63..10521075095 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -6,7 +6,8 @@ pub mod header; use alloc::fmt; use crate::{ - BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, + BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, + MaybeSerde, }; /// Helper trait that unifies all behaviour required by block to support full node operations. @@ -26,7 +27,6 @@ impl FullBlock for T where // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing // senders -#[auto_impl::auto_impl(&, Arc)] pub trait Block: Send + Sync @@ -44,11 +44,17 @@ pub trait Block: type Header: BlockHeader + 'static; /// The block's body contains the transactions in the block. - type Body: Send + Sync + Unpin + 'static; + type Body: BlockBody + Send + Sync + Unpin + 'static; + + /// Create new block instance. + fn new(header: Self::Header, body: Self::Body) -> Self; /// Returns reference to block header. fn header(&self) -> &Self::Header; /// Returns reference to block body. fn body(&self) -> &Self::Body; + + /// Splits the block into its header and body. + fn split(self) -> (Self::Header, Self::Body); } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a93b1cf538a..b381b7dd807 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,13 +1,16 @@ -use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; +use crate::{ + traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, + SealedHeader, TransactionSigned, TransactionSignedEcRecovered, +}; use alloc::vec::Vec; use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -23,73 +26,14 @@ pub struct Block { pub body: BlockBody, } -impl Block { - /// Calculate the header hash and seal the block so that it can't be changed. - pub fn seal_slow(self) -> SealedBlock { - SealedBlock { header: SealedHeader::seal(self.header), body: self.body } - } - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - pub fn seal(self, hash: B256) -> SealedBlock { - SealedBlock { header: SealedHeader::new(self.header, hash), body: self.body } - } - - /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { - self.body.recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - pub fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { - senders - } else { - let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders::new_unchecked(self, senders)) - } - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - pub fn with_recovered_senders(self) -> Option { - let senders = self.senders()?; - Some(BlockWithSenders::new_unchecked(self, senders)) - } -} - impl reth_primitives_traits::Block for Block { type Header = Header; type Body = BlockBody; + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header, body } + } + fn header(&self) -> &Self::Header { &self.header } @@ -97,6 +41,10 @@ impl reth_primitives_traits::Block for Block { fn body(&self) -> &Self::Body { &self.body } + + fn split(self) -> (Self::Header, Self::Body) { + (self.header, self.body) + } } impl InMemorySize for Block { @@ -204,44 +152,44 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] -pub struct BlockWithSenders { +pub struct BlockWithSenders { /// Block #[deref] #[deref_mut] - pub block: Block, + pub block: B, /// List of senders that match the transactions in the block pub senders: Vec
, } -impl BlockWithSenders { +impl BlockWithSenders { /// New block with senders - pub const fn new_unchecked(block: Block, senders: Vec
) -> Self { + pub const fn new_unchecked(block: B, senders: Vec
) -> Self { Self { block, senders } } /// New block with senders. Return none if len of tx and senders does not match - pub fn new(block: Block, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: B, senders: Vec
) -> Option { + (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) } /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. #[inline] - pub fn seal(self, hash: B256) -> SealedBlockWithSenders { + pub fn seal(self, hash: B256) -> SealedBlockWithSenders { let Self { block, senders } = self; - SealedBlockWithSenders { block: block.seal(hash), senders } + SealedBlockWithSenders:: { block: block.seal(hash), senders } } /// Calculate the header hash and seal the block with senders so that it can't be changed. #[inline] - pub fn seal_slow(self) -> SealedBlockWithSenders { + pub fn seal_slow(self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } } /// Split Structure to its components #[inline] - pub fn into_components(self) -> (Block, Vec
) { + pub fn into_components(self) -> (B, Vec
) { (self.block, self.senders) } @@ -249,18 +197,27 @@ impl BlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { - self.senders.iter().zip(self.block.body.transactions()) + ) -> impl Iterator::Transaction)> + + '_ { + self.senders.iter().zip(self.block.body().transactions()) } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block - .body - .transactions + .split() + .1 + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -268,8 +225,10 @@ impl BlockWithSenders { /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.split().1.into_transactions() } } @@ -308,10 +267,9 @@ impl SealedBlock { } impl SealedBlock { - /// Splits the sealed block into underlying components - #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) + /// Unseal the block + pub fn unseal(self) -> Block { + Block { header: self.header.unseal(), body: self.body } } /// Returns an iterator over all blob transactions of the block @@ -320,6 +278,23 @@ impl SealedBlock { self.body.blob_transactions_iter() } + /// Calculates the total gas used by blob transactions in the sealed block. + pub fn blob_gas_used(&self) -> u64 { + self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() + } + + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.has_blob_transactions() + } + + /// Returns whether or not the block contains any eip-7702 transactions. + #[inline] + pub fn has_eip7702_transactions(&self) -> bool { + self.body.has_eip7702_transactions() + } + /// Returns only the blob transactions, if any, from the block body. #[inline] pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { @@ -333,25 +308,42 @@ impl SealedBlock { .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) .flatten() } +} - /// Returns all blob versioned hashes from the block body. +impl SealedBlock +where + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Splits the sealed block into underlying components #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() + pub fn split(self) -> (SealedHeader, B) { + (self.header, self.body) } /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { + pub fn senders(&self) -> Option> + where + B::Transaction: SignedTransaction, + { self.body.recover_signers() } /// Seal sealed block with recovered transaction senders. - pub fn seal_with_senders(self) -> Option { + pub fn seal_with_senders(self) -> Option> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_seal_with_senders().ok() } /// Seal sealed block with recovered transaction senders. - pub fn try_seal_with_senders(self) -> Result { + pub fn try_seal_with_senders(self) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { match self.senders() { Some(senders) => Ok(SealedBlockWithSenders { block: self, senders }), None => Err(self), @@ -365,7 +357,11 @@ impl SealedBlock { /// If the number of senders does not match the number of transactions in the block /// and the signer recovery for one of the transactions fails. #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders { + pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_with_senders_unchecked(senders).expect("stored block is valid") } @@ -377,11 +373,15 @@ impl SealedBlock { /// /// Returns an error if a signature is invalid. #[track_caller] - pub fn try_with_senders_unchecked( + pub fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { + ) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { + let senders = if self.body.transactions().len() == senders.len() { senders } else { let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; @@ -391,28 +391,6 @@ impl SealedBlock { Ok(SealedBlockWithSenders { block: self, senders }) } - /// Unseal the block - pub fn unseal(self) -> Block { - Block { header: self.header.unseal(), body: self.body } - } - - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() - } - - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_blob_transactions(&self) -> bool { - self.body.has_blob_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() - } - /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -425,13 +403,16 @@ impl SealedBlock { /// /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> { + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { let calculated_root = self.body.calculate_tx_root(); - if self.header.transactions_root != calculated_root { + if self.header.transactions_root() != calculated_root { return Err(GotExpected { got: calculated_root, - expected: self.header.transactions_root, + expected: self.header.transactions_root(), }) } @@ -440,8 +421,11 @@ impl SealedBlock { /// Returns a vector of transactions RLP encoded with /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec { - self.body.transactions().map(|tx| tx.encoded_2718().into()).collect() + pub fn raw_transactions(&self) -> Vec + where + B::Transaction: Encodable2718, + { + self.body.transactions().iter().map(|tx| tx.encoded_2718().into()).collect() } } @@ -477,6 +461,10 @@ where type Header = H; type Body = B; + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header: SealedHeader::seal(header), body } + } + fn header(&self) -> &Self::Header { self.header.header() } @@ -484,6 +472,10 @@ where fn body(&self) -> &Self::Body { &self.body } + + fn split(self) -> (Self::Header, Self::Body) { + (self.header.unseal(), self.body) + } } #[cfg(any(test, feature = "arbitrary"))] @@ -499,45 +491,48 @@ where /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl Default for SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { fn default() -> Self { Self { block: SealedBlock::default(), senders: Default::default() } } } -impl SealedBlockWithSenders { +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { + pub fn new(block: SealedBlock, senders: Vec
) -> Option { (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } } -impl SealedBlockWithSenders { +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] - pub fn into_components(self) -> (SealedBlock, Vec
) { + pub fn into_components(self) -> (SealedBlock, Vec
) { (self.block, self.senders) } /// Returns the unsealed [`BlockWithSenders`] #[inline] - pub fn unseal(self) -> BlockWithSenders { - let Self { block, senders } = self; - BlockWithSenders::new_unchecked(block.unseal(), senders) + pub fn unseal(self) -> BlockWithSenders { + let (block, senders) = self.into_components(); + let (header, body) = block.split(); + let header = header.unseal(); + BlockWithSenders::new_unchecked(B::new(header, body), senders) } /// Returns an iterator over all transactions in the block. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { + pub fn transactions(&self) -> &[::Transaction] { self.block.body.transactions() } @@ -545,24 +540,34 @@ impl SealedBlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + + '_ { self.senders.iter().zip(self.block.body.transactions()) } /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.body.into_transactions() } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block .body - .transactions + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -608,11 +613,6 @@ impl BlockBody { Block { header, body: self } } - /// Calculate the transaction root for the block body. - pub fn calculate_tx_root(&self) -> B256 { - crate::proofs::calculate_transaction_root(&self.transactions) - } - /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -624,20 +624,6 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } - /// Recover signer addresses for all transactions in the block body. - pub fn recover_signers(&self) -> Option> { - TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) - } - - /// Recover signer addresses for all transactions in the block body _without ensuring that the - /// signature has a low `s` value_. - /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [`TransactionSigned::recover_signer_unchecked`]. - pub fn recover_signers_unchecked(&self) -> Option> { - TransactionSigned::recover_signers_unchecked(&self.transactions, self.transactions.len()) - } - /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -703,6 +689,10 @@ impl reth_primitives_traits::BlockBody for BlockBody { fn transactions(&self) -> &[Self::Transaction] { &self.transactions } + + fn into_transactions(self) -> Vec { + self.transactions + } } impl From for BlockBody { @@ -1168,9 +1158,9 @@ mod tests { Some(BlockWithSenders { block: block.clone(), senders: vec![sender] }) ); let sealed = block.seal_slow(); - assert_eq!(SealedBlockWithSenders::new(sealed.clone(), vec![]), None); + assert_eq!(SealedBlockWithSenders::::new(sealed.clone(), vec![]), None); assert_eq!( - SealedBlockWithSenders::new(sealed.clone(), vec![sender]), + SealedBlockWithSenders::::new(sealed.clone(), vec![sender]), Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) ); } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 203880209a2..049d4f202a6 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -21,6 +21,9 @@ extern crate alloc; +mod traits; +pub use traits::*; + #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 1712112281f..81c26d7180e 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,7 +1,7 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; -use alloc::vec::Vec; +use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use alloc::{borrow::Borrow, vec::Vec}; use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; @@ -12,9 +12,9 @@ use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// `(rlp(index), encoded(tx))` pairs. pub fn calculate_transaction_root(transactions: &[T]) -> B256 where - T: AsRef, + T: Encodable2718, { - ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_2718(buf)) + ordered_trie_root_with_encoder(transactions, |tx, buf| tx.borrow().encode_2718(buf)) } /// Calculates the root hash of the withdrawals. diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs new file mode 100644 index 00000000000..73eabd8ec98 --- /dev/null +++ b/crates/primitives/src/traits.rs @@ -0,0 +1,123 @@ +use crate::{ + transaction::{recover_signers, recover_signers_unchecked}, + BlockWithSenders, SealedBlock, +}; +use alloc::vec::Vec; +use alloy_eips::eip2718::Encodable2718; +use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; +use revm_primitives::{Address, B256}; + +/// Extension trait for [`reth_primitives_traits::Block`] implementations +/// allowing for conversions into common block parts containers such as [`SealedBlock`], +/// [`BlockWithSenders`], etc. +pub trait BlockExt: Block { + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::seal(header), body } + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::new(header, hash), body } + } + + /// Expensive operation that recovers transaction signer. + fn senders(&self) -> Option> + where + ::Transaction: SignedTransaction, + { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders + where + ::Transaction: SignedTransaction, + { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`recover_signers_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self> + where + ::Transaction: SignedTransaction, + { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; + senders + }; + + Ok(BlockWithSenders::new_unchecked(self, senders)) + } + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> + where + ::Transaction: SignedTransaction, + { + let senders = self.senders()?; + Some(BlockWithSenders::new_unchecked(self, senders)) + } +} + +impl BlockExt for T {} + +/// Extension trait for [`BlockBody`] adding helper methods operating with transactions. +pub trait BlockBodyTxExt: BlockBody { + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256 + where + Self::Transaction: Encodable2718, + { + crate::proofs::calculate_transaction_root(self.transactions()) + } + + /// Recover signer addresses for all transactions in the block body. + fn recover_signers(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers(self.transactions(), self.transactions().len()) + } + + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`recover_signers_unchecked`]. + fn recover_signers_unchecked(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers_unchecked(self.transactions(), self.transactions().len()) + } +} + +impl BlockBodyTxExt for T {} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs deleted file mode 100644 index 49fb73ea555..00000000000 --- a/crates/primitives/src/traits/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Abstractions of primitive data types - -pub mod block; -pub mod transaction; - -pub use block::{body::BlockBody, Block}; -pub use transaction::signed::SignedTransaction; - -pub use alloy_consensus::BlockHeader; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b8a3f4a719b..f61415ec2ec 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1712,6 +1712,11 @@ pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { let signer = self.recover_signer_unchecked()?; Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) } + + /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { + TransactionSignedEcRecovered::from_signed_transaction(self, signer) + } } impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} @@ -1936,6 +1941,22 @@ where } } +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `None`, if some transaction's signature is invalid. +pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 71e8bf39f9e..390ea7d6ba4 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -5,7 +5,7 @@ use alloy_primitives::U64; use alloy_rpc_types_engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types_compat::engine::payload::{ diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 548f9101023..0f20cb4204e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -17,8 +17,8 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, Block, BlockBody, Receipt, SealedBlockWithSenders, - SealedHeader, TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, BlockExt, Receipt, + SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 2223ecdc9f7..d898c0fe832 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -315,7 +315,7 @@ pub trait EthTransactions: LoadTransaction { { async move { if let Some(block) = self.block_with_senders(block_id).await? { - if let Some(tx) = block.transactions().nth(index) { + if let Some(tx) = block.transactions().get(index) { return Ok(Some(tx.encoded_2718().into())) } } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index f2b1d93be83..564f5a939fc 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -43,7 +43,7 @@ pub fn from_block_with_tx_hashes( block_hash: Option, ) -> Block { let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.transactions().map(|tx| tx.hash()).collect(); + let transactions = block.body.transactions.iter().map(|tx| tx.hash()).collect(); from_block_with_transactions( block.length(), diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 7f260a7693c..46bc9502c57 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -15,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, SealedBlock, TransactionSigned, + Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] @@ -363,6 +363,7 @@ mod tests { CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; + use reth_primitives::BlockExt; #[test] fn roundtrip_payload_to_block() { diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index dd6bf9bbc24..5425de402f8 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -18,7 +18,7 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, SealedBlockWithSenders}; +use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b90729c7131..c2de9292402 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -5,7 +5,7 @@ use std::{ use futures_util::TryStreamExt; use reth_codecs::Compact; -use reth_primitives_traits::BlockBody; +use reth_primitives_traits::{Block, BlockBody}; use tracing::*; use alloy_primitives::TxNumber; @@ -151,7 +151,7 @@ where + StaticFileProviderFactory + StatsReader + BlockReader - + BlockWriter, + + BlockWriter>, D: BodyDownloader>, { /// Return the id of the stage diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 168ae94817b..5b686cfe109 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -44,7 +44,7 @@ where .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; let mut transactions_cursor = provider.tx_ref().cursor_read::::Primitives as NodePrimitives>::SignedTx, + ::SignedTx, >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 967ac785b47..6801abee40d 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -793,7 +793,9 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; + use reth_primitives::{ + BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, + }; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index bf976203726..d45b4e53124 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -46,9 +46,9 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{NodeTypes, TxTy}; +use reth_node_types::{BlockTy, NodeTypes, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, Receipt, + Account, Block, BlockBody, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; @@ -380,7 +380,7 @@ impl DatabasePr /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders::Body>, + block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -2751,7 +2751,7 @@ impl BlockExecu impl BlockWriter for DatabaseProvider { - type Body = <::Block as reth_primitives_traits::Block>::Body; + type Block = BlockTy; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -2775,7 +2775,7 @@ impl BlockWrite /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, write_transactions_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; @@ -2849,7 +2849,7 @@ impl BlockWrite fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option)>, + bodies: Vec<(BlockNumber, Option<::Body>)>, write_transactions_to: StorageLocation, ) -> ProviderResult<()> { let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; @@ -2868,11 +2868,7 @@ impl BlockWrite // Initialize cursor if we will be writing transactions to database let mut tx_cursor = write_transactions_to .database() - .then(|| { - self.tx.cursor_write::::Transaction, - >>() - }) + .then(|| self.tx.cursor_write::>>()) .transpose()?; // Get id for the next tx_num of zero if there are no transactions. @@ -3017,7 +3013,7 @@ impl BlockWrite /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec>, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 68d1a168f15..497f817b88a 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -78,6 +78,7 @@ where Storage: ChainStorage, Primitives: FullNodePrimitives< SignedTx: Value + From + Into, + BlockHeader = alloy_consensus::Header, >, >, { @@ -89,6 +90,7 @@ impl NodeTypesForProvider for T where Storage: ChainStorage, Primitives: FullNodePrimitives< SignedTx: Value + From + Into, + BlockHeader = alloy_consensus::Header, >, > { diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index c2ce477051d..aec54362656 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; @@ -62,7 +61,7 @@ pub trait StateReader: Send + Sync { #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { /// The body this writer can write. - type Body: Send + Sync; + type Block: reth_primitives_traits::Block; /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. @@ -71,7 +70,7 @@ pub trait BlockWriter: Send + Sync { /// transition in the block. fn insert_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, write_transactions_to: StorageLocation, ) -> ProviderResult; @@ -82,7 +81,7 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option)>, + bodies: Vec<(BlockNumber, Option<::Body>)>, write_transactions_to: StorageLocation, ) -> ProviderResult<()>; @@ -118,7 +117,7 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec>, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 30c5f0d5291..e91ea3bea3e 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -15,7 +15,7 @@ use reth_db::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; +use reth_primitives::{SealedBlock, StaticFileSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, @@ -148,7 +148,7 @@ impl UnifiedStorageWriter<'_, (), ()> { impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider - + BlockWriter + + BlockWriter + TransactionsProviderExt + StateChangeWriter + TrieWriter diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index d2077edafff..5ab85119184 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -98,6 +98,7 @@ where fn process_block(&mut self, block: &SealedBlockWithSenders) { let txs: Vec<_> = block .transactions() + .iter() .filter(|tx| tx.is_eip4844()) .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) .collect(); @@ -191,6 +192,7 @@ where for (_, block) in old.blocks().iter() { let txs: Vec = block .transactions() + .iter() .filter(|tx: &&reth::primitives::TransactionSigned| { tx.is_eip4844() }) diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 7fa44418c52..42bb83782aa 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); + let tx = &head.tip().transactions()[0]; assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); Ok(()) diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 2e264d017a3..14001d147f2 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -9,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockExt, SealedHeader}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 179d1216053..e2955d01768 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -4,7 +4,7 @@ use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::{SealedHeader, TransactionSigned}; +use reth_primitives::{BlockExt, SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, From caac226c73ad119bbbf68632337587de17dafb57 Mon Sep 17 00:00:00 2001 From: Jun Song <87601811+syjn99@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:54:45 +0900 Subject: [PATCH 674/970] chore: make CanonicalInMemoryState generic over sealed header and block (#12835) --- Cargo.lock | 1 + crates/chain-state/Cargo.toml | 18 ++--- crates/chain-state/src/chain_info.rs | 70 +++++++++++--------- crates/chain-state/src/in_memory.rs | 51 ++++++++------ crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/node.rs | 8 ++- crates/storage/provider/src/providers/mod.rs | 2 +- 7 files changed, 92 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6463fd11a01..9641d8e26ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6614,6 +6614,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "reth-testing-utils", "reth-trie", diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index ff62b76e5df..2f4bed00f95 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -18,6 +18,7 @@ reth-errors.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-trie.workspace = true @@ -56,12 +57,13 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "rand", - "revm", - "reth-chainspec/test-utils", - "reth-primitives/test-utils", - "reth-trie/test-utils", - "revm?/test-utils", + "alloy-signer", + "alloy-signer-local", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index 3c75544ac46..1b8575005c4 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -1,8 +1,9 @@ +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::BlockNumber; use parking_lot::RwLock; use reth_chainspec::ChainInfo; -use reth_primitives::SealedHeader; +use reth_primitives::{NodePrimitives, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, @@ -14,17 +15,21 @@ use tokio::sync::watch; /// Tracks the chain info: canonical head, safe block, finalized block. #[derive(Debug, Clone)] -pub struct ChainInfoTracker { - inner: Arc, +pub struct ChainInfoTracker { + inner: Arc>, } -impl ChainInfoTracker { +impl ChainInfoTracker +where + N: NodePrimitives, + N::BlockHeader: BlockHeader, +{ /// Create a new chain info container for the given canonical head and finalized header if it /// exists. pub fn new( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let (finalized_block, _) = watch::channel(finalized); let (safe_block, _) = watch::channel(safe); @@ -33,7 +38,7 @@ impl ChainInfoTracker { inner: Arc::new(ChainInfoInner { last_forkchoice_update: RwLock::new(None), last_transition_configuration_exchange: RwLock::new(None), - canonical_head_number: AtomicU64::new(head.number), + canonical_head_number: AtomicU64::new(head.number()), canonical_head: RwLock::new(head), safe_block, finalized_block, @@ -44,7 +49,7 @@ impl ChainInfoTracker { /// Returns the [`ChainInfo`] for the canonical head. pub fn chain_info(&self) -> ChainInfo { let inner = self.inner.canonical_head.read(); - ChainInfo { best_hash: inner.hash(), best_number: inner.number } + ChainInfo { best_hash: inner.hash(), best_number: inner.number() } } /// Update the timestamp when we received a forkchoice update. @@ -68,17 +73,17 @@ impl ChainInfoTracker { } /// Returns the canonical head of the chain. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.canonical_head.read().clone() } /// Returns the safe header of the chain. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.safe_block.borrow().clone() } /// Returns the finalized header of the chain. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.finalized_block.borrow().clone() } @@ -104,8 +109,8 @@ impl ChainInfoTracker { } /// Sets the canonical head of the chain. - pub fn set_canonical_head(&self, header: SealedHeader) { - let number = header.number; + pub fn set_canonical_head(&self, header: SealedHeader) { + let number = header.number(); *self.inner.canonical_head.write() = header; // also update the atomic number. @@ -113,7 +118,7 @@ impl ChainInfoTracker { } /// Sets the safe header of the chain. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.safe_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -125,7 +130,7 @@ impl ChainInfoTracker { } /// Sets the finalized header of the chain. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.finalized_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -137,19 +142,21 @@ impl ChainInfoTracker { } /// Subscribe to the finalized block. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.finalized_block.subscribe() } /// Subscribe to the safe block. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.safe_block.subscribe() } } /// Container type for all chain info fields #[derive(Debug)] -struct ChainInfoInner { +struct ChainInfoInner { /// Timestamp when we received the last fork choice update. /// /// This is mainly used to track if we're connected to a beacon node. @@ -161,16 +168,17 @@ struct ChainInfoInner { /// Tracks the number of the `canonical_head`. canonical_head_number: AtomicU64, /// The canonical head of the chain. - canonical_head: RwLock, + canonical_head: RwLock>, /// The block that the beacon node considers safe. - safe_block: watch::Sender>, + safe_block: watch::Sender>>, /// The block that the beacon node considers finalized. - finalized_block: watch::Sender>, + finalized_block: watch::Sender>>, } #[cfg(test)] mod tests { use super::*; + use reth_primitives::EthPrimitives; use reth_testing_utils::{generators, generators::random_header}; #[test] @@ -180,7 +188,8 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(header.clone(), None, None); // Fetch the chain information from the tracker let chain_info = tracker.chain_info(); @@ -197,7 +206,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no forkchoice update yet (the timestamp is None) assert!(tracker.last_forkchoice_update_received_at().is_none()); @@ -216,7 +225,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no transition configuration exchange yet (the timestamp is // None) @@ -239,7 +248,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Set the second header as the canonical head of the tracker tracker.set_canonical_head(header2.clone()); @@ -260,7 +269,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header (header1) - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Call the set_safe method with the second header (header2) tracker.set_safe(header2.clone()); @@ -306,7 +315,7 @@ mod tests { let header3 = random_header(&mut rng, 30, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Initial state: finalize header should be None assert!(tracker.get_finalized_header().is_none()); @@ -343,7 +352,7 @@ mod tests { let finalized_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the finalized header - let tracker = + let tracker: ChainInfoTracker = ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None); // Assert that the BlockNumHash returned matches the finalized header @@ -357,7 +366,8 @@ mod tests { let safe_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the safe header - let tracker = ChainInfoTracker::new(safe_header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(safe_header.clone(), None, None); tracker.set_safe(safe_header.clone()); // Assert that the BlockNumHash returned matches the safe header diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 24f394a761f..f9a10fc5543 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -134,7 +134,7 @@ impl InMemoryState { pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. - pub(crate) chain_info_tracker: ChainInfoTracker, + pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. @@ -158,6 +158,11 @@ impl CanonicalInMemoryStateInner { } } +type PendingBlockAndReceipts = ( + SealedBlock, reth_primitives_traits::BodyTy>, + Vec>, +); + /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. @@ -166,15 +171,21 @@ pub struct CanonicalInMemoryState>, } -impl CanonicalInMemoryState { +impl CanonicalInMemoryState +where + N: NodePrimitives< + BlockHeader = alloy_consensus::Header, + BlockBody = reth_primitives::BlockBody, + >, +{ /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( blocks: HashMap>>, numbers: BTreeMap, pending: Option>, - finalized: Option, - safe: Option, + finalized: Option>, + safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state @@ -201,9 +212,9 @@ impl CanonicalInMemoryState { /// Create a new in memory state with the given local head and finalized header /// if it exists. pub fn with_head( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let chain_info_tracker = ChainInfoTracker::new(head, finalized, safe); let in_memory_state = InMemoryState::default(); @@ -224,7 +235,7 @@ impl CanonicalInMemoryState { } /// Returns the header corresponding to the given hash. - pub fn header_by_hash(&self, hash: B256) -> Option { + pub fn header_by_hash(&self, hash: B256) -> Option> { self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) } @@ -427,37 +438,37 @@ impl CanonicalInMemoryState { } /// Canonical head setter. - pub fn set_canonical_head(&self, header: SealedHeader) { + pub fn set_canonical_head(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_canonical_head(header); } /// Safe head setter. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_safe(header); } /// Finalized head setter. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_finalized(header); } /// Canonical head getter. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.chain_info_tracker.get_canonical_head() } /// Finalized header getter. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.chain_info_tracker.get_finalized_header() } /// Safe header getter. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.chain_info_tracker.get_safe_header() } /// Returns the `SealedHeader` corresponding to the pending state. - pub fn pending_sealed_header(&self) -> Option { + pub fn pending_sealed_header(&self) -> Option> { self.pending_state().map(|h| h.block_ref().block().header.clone()) } @@ -467,7 +478,7 @@ impl CanonicalInMemoryState { } /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option { + pub fn pending_block(&self) -> Option> { self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } @@ -479,7 +490,7 @@ impl CanonicalInMemoryState { /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) @@ -491,12 +502,14 @@ impl CanonicalInMemoryState { } /// Subscribe to new safe block events. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_safe_block() } /// Subscribe to new finalized block events. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_finalized_block() } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 4d068b2ff4d..338f8f621e1 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -78,7 +78,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{FullNodePrimitives, NodePrimitives, ReceiptTy}; +pub use node::{BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy}; /// Helper trait that requires arbitrary implementation if the feature is enabled. #[cfg(any(feature = "test-utils", feature = "arbitrary"))] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 904ed7d12f1..fa1236b6dd3 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -109,5 +109,11 @@ impl FullNodePrimitives for T where { } -/// Helper adapter type for accessing [`NodePrimitives`] receipt type. +/// Helper adapter type for accessing [`NodePrimitives`] block header types. +pub type HeaderTy = ::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives`] block body types. +pub type BodyTy = ::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives`] receipt types. pub type ReceiptTy = ::Receipt; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 497f817b88a..6d8e3ed5e17 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -117,7 +117,7 @@ pub struct BlockchainProvider { /// The blockchain tree instance. tree: Arc, /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, + chain_info: ChainInfoTracker, } impl Clone for BlockchainProvider { From 04dd005af9a8f820ac2e008ed0b4162c4b5b77eb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 25 Nov 2024 13:13:01 +0000 Subject: [PATCH 675/970] feat(trie): sparse trie methods for trie task integration (#12720) --- Cargo.lock | 1 + crates/trie/sparse/Cargo.toml | 1 + crates/trie/sparse/src/state.rs | 263 ++++++++++++++++++++++++++++++-- crates/trie/sparse/src/trie.rs | 92 +++++++++-- crates/trie/trie/src/updates.rs | 15 +- 5 files changed, 344 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9641d8e26ae..a3ccf46b34e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9454,6 +9454,7 @@ version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", + "arbitrary", "assert_matches", "criterion", "itertools 0.13.0", diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 3301975961e..4c64bf716de 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -32,6 +32,7 @@ reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +arbitrary.workspace = true assert_matches.workspace = true criterion.workspace = true itertools.workspace = true diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 0b0db140115..b76da793710 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -6,17 +6,23 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::Decodable; -use reth_trie::{Nibbles, TrieNode}; +use reth_trie::{ + updates::{StorageTrieUpdates, TrieUpdates}, + Nibbles, TrieNode, +}; /// Sparse state trie representing lazy-loaded Ethereum state trie. #[derive(Default, Debug)] pub struct SparseStateTrie { + retain_updates: bool, /// Sparse account trie. - pub(crate) state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - pub(crate) storages: HashMap, + storages: HashMap, /// Collection of revealed account and storage keys. - pub(crate) revealed: HashMap>, + revealed: HashMap>, + /// Collection of addresses that had their storage tries wiped. + wiped_storages: HashSet, } impl SparseStateTrie { @@ -25,6 +31,12 @@ impl SparseStateTrie { Self { state, ..Default::default() } } + /// Set the retention of branch node updates and deletions. + pub const fn with_updates(mut self, retain_updates: bool) -> Self { + self.retain_updates = retain_updates; + self + } + /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: &B256) -> bool { self.revealed.contains_key(account) @@ -42,7 +54,7 @@ impl SparseStateTrie { account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { - if self.revealed.contains_key(&account) { + if self.is_account_revealed(&account) { return Ok(()); } @@ -51,7 +63,7 @@ impl SparseStateTrie { let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node)?; + let trie = self.state.reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -73,7 +85,7 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { - if self.revealed.get(&account).is_some_and(|v| v.contains(&slot)) { + if self.is_storage_slot_revealed(&account, &slot) { return Ok(()); } @@ -82,7 +94,11 @@ impl SparseStateTrie { let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root(root_node)?; + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -118,30 +134,98 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { self.state.update_leaf(path, value)?; Ok(()) } + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + /// Returns sparse trie root if the trie has been revealed. pub fn root(&mut self) -> Option { self.state.root() } + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.state.calculate_below_level(level); + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().update_leaf(slot, value)?; + Ok(()) + } + + /// Wipe the storage trie at the provided address. + pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { + let Some(trie) = self.storages.get_mut(&address) else { return Ok(()) }; + self.wiped_storages.insert(address); + trie.wipe().map_err(Into::into) + } + /// Returns storage sparse trie root if the trie has been revealed. pub fn storage_root(&mut self, account: B256) -> Option { self.storages.get_mut(&account).and_then(|trie| trie.root()) } + + /// Returns [`TrieUpdates`] by taking the updates from the revealed sparse tries. + /// + /// Returns `None` if the accounts trie is not revealed. + pub fn take_trie_updates(&mut self) -> Option { + self.state.as_revealed_mut().map(|state| { + let updates = state.take_updates(); + TrieUpdates { + account_nodes: HashMap::from_iter(updates.updated_nodes), + removed_nodes: HashSet::from_iter(updates.removed_nodes), + storage_tries: self + .storages + .iter_mut() + .map(|(address, trie)| { + let trie = trie.as_revealed_mut().unwrap(); + let updates = trie.take_updates(); + let updates = StorageTrieUpdates { + is_deleted: self.wiped_storages.contains(address), + storage_nodes: HashMap::from_iter(updates.updated_nodes), + removed_nodes: HashSet::from_iter(updates.removed_nodes), + }; + (*address, updates) + }) + .filter(|(_, updates)| !updates.is_empty()) + .collect(), + } + }) + } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Bytes; + use alloy_primitives::{b256, Bytes, U256}; use alloy_rlp::EMPTY_STRING_CODE; + use arbitrary::Arbitrary; use assert_matches::assert_matches; - use reth_trie::HashBuilder; + use itertools::Itertools; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use reth_primitives_traits::Account; + use reth_trie::{ + updates::StorageTrieUpdates, BranchNodeCompact, HashBuilder, TrieAccount, TrieMask, + EMPTY_ROOT_HASH, + }; use reth_trie_common::proof::ProofRetainer; #[test] @@ -199,4 +283,159 @@ mod tests { HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } + + #[test] + fn take_trie_updates() { + reth_tracing::init_test_tracing(); + + // let mut rng = generators::rng(); + let mut rng = StdRng::seed_from_u64(1); + + let mut bytes = [0u8; 1024]; + rng.fill(bytes.as_mut_slice()); + + let slot_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_1 = Nibbles::unpack(slot_1); + let value_1 = U256::from(rng.gen::()); + let slot_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let slot_path_2 = Nibbles::unpack(slot_2); + let value_2 = U256::from(rng.gen::()); + let slot_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_3 = Nibbles::unpack(slot_3); + let value_3 = U256::from(rng.gen::()); + + let mut storage_hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + slot_path_1.clone(), + slot_path_2.clone(), + ])); + storage_hash_builder.add_leaf(slot_path_1.clone(), &alloy_rlp::encode_fixed_size(&value_1)); + storage_hash_builder.add_leaf(slot_path_2.clone(), &alloy_rlp::encode_fixed_size(&value_2)); + + let storage_root = storage_hash_builder.root(); + let proof_nodes = storage_hash_builder.take_proof_nodes(); + let storage_proof_1 = proof_nodes + .iter() + .filter(|(path, _)| path.is_empty() || slot_path_1.common_prefix_length(path) > 0) + .map(|(path, proof)| (path.clone(), proof.clone())) + .sorted_by_key(|(path, _)| path.clone()) + .collect::>(); + let storage_proof_2 = proof_nodes + .iter() + .filter(|(path, _)| path.is_empty() || slot_path_2.common_prefix_length(path) > 0) + .map(|(path, proof)| (path.clone(), proof.clone())) + .sorted_by_key(|(path, _)| path.clone()) + .collect::>(); + + let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let address_path_1 = Nibbles::unpack(address_1); + let account_1 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_1 = TrieAccount::from((account_1, storage_root)); + let address_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let address_path_2 = Nibbles::unpack(address_2); + let account_2 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_2 = TrieAccount::from((account_2, EMPTY_ROOT_HASH)); + + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + address_path_1.clone(), + address_path_2.clone(), + ])); + hash_builder.add_leaf(address_path_1.clone(), &alloy_rlp::encode(trie_account_1)); + hash_builder.add_leaf(address_path_2.clone(), &alloy_rlp::encode(trie_account_2)); + + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let proof_1 = proof_nodes + .iter() + .filter(|(path, _)| path.is_empty() || address_path_1.common_prefix_length(path) > 0) + .map(|(path, proof)| (path.clone(), proof.clone())) + .sorted_by_key(|(path, _)| path.clone()) + .collect::>(); + let proof_2 = proof_nodes + .iter() + .filter(|(path, _)| path.is_empty() || address_path_2.common_prefix_length(path) > 0) + .map(|(path, proof)| (path.clone(), proof.clone())) + .sorted_by_key(|(path, _)| path.clone()) + .collect::>(); + + let mut sparse = SparseStateTrie::default().with_updates(true); + sparse.reveal_account(address_1, proof_1).unwrap(); + sparse.reveal_account(address_2, proof_2).unwrap(); + sparse.reveal_storage_slot(address_1, slot_1, storage_proof_1.clone()).unwrap(); + sparse.reveal_storage_slot(address_1, slot_2, storage_proof_2.clone()).unwrap(); + sparse.reveal_storage_slot(address_2, slot_1, storage_proof_1).unwrap(); + sparse.reveal_storage_slot(address_2, slot_2, storage_proof_2).unwrap(); + + assert_eq!(sparse.root(), Some(root)); + + let address_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let address_path_3 = Nibbles::unpack(address_3); + let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; + let trie_account_3 = TrieAccount::from((account_3, EMPTY_ROOT_HASH)); + + sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + + sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); + sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + + sparse.wipe_storage(address_2).unwrap(); + trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); + sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + + sparse.root(); + + let sparse_updates = sparse.take_trie_updates().unwrap(); + // TODO(alexey): assert against real state root calculation updates + pretty_assertions::assert_eq!( + sparse_updates, + TrieUpdates { + account_nodes: HashMap::from_iter([ + ( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!( + "4c4ffbda3569fcf2c24ea2000b4cec86ef8b92cbf9ff415db43184c0f75a212e" + )], + root_hash: Some(b256!( + "60944bd29458529c3065d19f63c6e3d5269596fd3b04ca2e7b318912dc89ca4c" + )) + }, + ), + ]), + storage_tries: HashMap::from_iter([ + ( + b256!("1000000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!("5bc8b4fdf51839c1e18b8d6a4bd3e2e52c9f641860f0e4d197b68c2679b0e436")], + root_hash: Some(b256!("c44abf1a9e1a92736ac479b20328e8d7998aa8838b6ef52620324c9ce85e3201")) + } + )]), + removed_nodes: HashSet::default() + } + ), + ( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + ) + ]), + removed_nodes: HashSet::default() + } + ); + } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index dff29027175..bab166d7831 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -53,9 +53,13 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + pub fn reveal_root( + &mut self, + root: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { if self.is_blind() { - *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root)?)) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root, retain_updates)?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -67,10 +71,29 @@ impl SparseTrie { Ok(()) } + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } + + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.wipe(); + Ok(()) + } + /// Calculates and returns the trie root if the trie has been revealed. pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.as_revealed_mut().unwrap().update_rlp_node_level(level); + } } /// The representation of revealed sparse trie. @@ -120,19 +143,20 @@ impl Default for RevealedSparseTrie { impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. - pub fn from_root(node: TrieNode) -> SparseTrieResult { + pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { let mut this = Self { nodes: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, - }; + } + .with_updates(retain_updates); this.reveal_node(Nibbles::default(), node)?; Ok(this) } - /// Makes the sparse trie to store updated branch nodes. + /// Set the retention of branch node updates and deletions. pub fn with_updates(mut self, retain_updates: bool) -> Self { if retain_updates { self.updates = Some(SparseTrieUpdates::default()); @@ -580,6 +604,12 @@ impl RevealedSparseTrie { Ok(nodes) } + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) { + *self = Self::default(); + self.prefix_set = PrefixSetMut::all(); + } + /// Return the root of the sparse trie. /// Updates all remaining dirty nodes before calculating the root. pub fn root(&mut self) -> B256 { @@ -773,8 +803,7 @@ impl RevealedSparseTrie { } // Set the hash mask. If a child node has a hash value AND is a - // branch node, set the hash mask - // and save the hash. + // branch node, set the hash mask and save the hash. let hash = child.as_hash().filter(|_| node_type.is_branch()); hash_mask_values.push(hash.is_some()); if let Some(hash) = hash { @@ -998,8 +1027,8 @@ impl RlpNodeBuffers { /// The aggregation of sparse trie updates. #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct SparseTrieUpdates { - updated_nodes: HashMap, - removed_nodes: HashSet, + pub(crate) updated_nodes: HashMap, + pub(crate) removed_nodes: HashSet, } #[cfg(test)] @@ -1560,7 +1589,7 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); + let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); // Reveal a branch node and one of its children // @@ -1722,6 +1751,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1796,6 +1826,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1866,6 +1897,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1993,4 +2025,44 @@ mod tests { assert_eq!(sparse_root, hash_builder.root()); assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); } + + #[test] + fn sparse_trie_wipe() { + let mut sparse = RevealedSparseTrie::default().with_updates(true); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + sparse.wipe(); + + assert_eq!(sparse.root(), EMPTY_ROOT_HASH); + } } diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 6d1bcab63d8..e7bc490647c 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -6,11 +6,14 @@ use std::collections::{HashMap, HashSet}; #[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { + /// Collection of updated intermediate account nodes indexed by full path. #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) account_nodes: HashMap, + pub account_nodes: HashMap, + /// Collection of removed intermediate account nodes indexed by full path. #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + pub removed_nodes: HashSet, + /// Collection of updated storage tries indexed by the hashed address. + pub storage_tries: HashMap, } impl TrieUpdates { @@ -113,13 +116,13 @@ impl TrieUpdates { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. - pub(crate) is_deleted: bool, + pub is_deleted: bool, /// Collection of updated storage trie nodes. #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) storage_nodes: HashMap, + pub storage_nodes: HashMap, /// Collection of removed storage trie nodes. #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, + pub removed_nodes: HashSet, } #[cfg(feature = "test-utils")] From 863c5233fcff3fc5335640d8ef86cf85bef9a2a4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 25 Nov 2024 17:29:25 +0400 Subject: [PATCH 676/970] feat: `ChainStorageReader` (#12836) --- crates/chain-state/Cargo.toml | 18 +- crates/chain-state/src/in_memory.rs | 13 +- crates/optimism/node/src/node.rs | 31 +++- crates/primitives/src/block.rs | 6 - crates/prune/prune/src/segments/mod.rs | 7 +- crates/storage/errors/src/provider.rs | 2 + .../src/providers/blockchain_provider.rs | 2 + .../provider/src/providers/consistent.rs | 30 +--- .../provider/src/providers/database/chain.rs | 24 ++- .../src/providers/database/provider.rs | 161 +++++++----------- crates/storage/provider/src/providers/mod.rs | 7 +- crates/storage/storage-api/src/chain.rs | 81 ++++++++- .../transaction-pool/src/blobstore/tracker.rs | 2 + 13 files changed, 227 insertions(+), 157 deletions(-) diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 2f4bed00f95..54f7ac43de1 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -57,13 +57,13 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "rand", - "revm", - "reth-chainspec/test-utils", - "reth-primitives/test-utils", - "reth-trie/test-utils", - "revm?/test-utils", - "reth-primitives-traits/test-utils" + "alloy-signer", + "alloy-signer-local", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", ] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f9a10fc5543..933439a7c13 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -15,6 +15,7 @@ use reth_primitives::{ BlockWithSenders, NodePrimitives, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; +use reth_primitives_traits::BlockBody as _; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -547,8 +548,13 @@ where /// Returns a `TransactionSigned` for the given `TxHash` if found. pub fn transaction_by_hash(&self, hash: TxHash) -> Option { for block_state in self.canonical_chain() { - if let Some(tx) = - block_state.block_ref().block().body.transactions().find(|tx| tx.hash() == hash) + if let Some(tx) = block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .find(|tx| tx.hash() == hash) { return Some(tx.clone()) } @@ -568,6 +574,7 @@ where .block() .body .transactions() + .iter() .enumerate() .find(|(_, tx)| tx.hash() == tx_hash) { @@ -748,6 +755,7 @@ impl BlockState { .block() .body .transactions() + .iter() .find(|tx| tx.hash() == hash) .cloned() }) @@ -764,6 +772,7 @@ impl BlockState { .block() .body .transactions() + .iter() .enumerate() .find(|(_, tx)| tx.hash() == tx_hash) .map(|(index, tx)| { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 82b2ce2ebc2..cc0a61833e4 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_chainspec::{EthChainSpec, Hardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; @@ -32,8 +32,8 @@ use reth_optimism_rpc::{ use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::BlockBody; use reth_provider::{ - providers::ChainStorage, BlockBodyWriter, CanonStateSubscriptions, DBProvider, EthStorage, - ProviderResult, + providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, + ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, }; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -72,7 +72,31 @@ impl> BlockBodyWriter for } } +impl> + BlockBodyReader for OpStorage +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult> { + self.0.read_block_bodies(provider, inputs) + } +} + impl ChainStorage for OpStorage { + fn reader( + &self, + ) -> impl reth_provider::ChainStorageReader, OpPrimitives> + where + TX: DbTx + 'static, + Types: reth_provider::providers::NodeTypesForProvider, + { + self + } + fn writer( &self, ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> @@ -83,6 +107,7 @@ impl ChainStorage for OpStorage { self } } + /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b381b7dd807..d7babfc6289 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -661,12 +661,6 @@ impl BlockBody { pub fn blob_versioned_hashes(&self) -> Vec<&B256> { self.blob_versioned_hashes_iter().collect() } - - /// Returns an iterator over all transactions. - #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.transactions.iter() - } } impl InMemorySize for BlockBody { diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index b3b40aab5b3..e828512fa82 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -148,6 +148,7 @@ impl PruneInput { mod tests { use super::*; use alloy_primitives::B256; + use reth_primitives_traits::BlockBody; use reth_provider::{ providers::BlockchainProvider2, test_utils::{create_test_provider_factory, MockEthProvider}, @@ -245,7 +246,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1); } @@ -292,7 +293,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1,); } @@ -327,7 +328,7 @@ mod tests { // Get the last tx number // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); let max_range = num_txs - 1; // Create a prune input with a previous checkpoint that is the last tx number diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 9e6720b8440..e69c0343f56 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -133,6 +133,8 @@ pub enum ProviderError { StorageLockError(StorageLockError), /// Storage writer error. UnifiedStorageWriterError(UnifiedStorageWriterError), + /// Received invalid output from configured storage implementation. + InvalidStorageOutput, } impl From for ProviderError { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 6801abee40d..d90b227c112 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -30,6 +30,7 @@ use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; +use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{DBProvider, StorageChangeSetReader}; @@ -796,6 +797,7 @@ mod tests { use reth_primitives::{ BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, }; + use reth_primitives_traits::BlockBody as _; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index fc9d739b0fe..740392ad993 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -977,7 +977,7 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { - return Ok(Some(tx.into())) + return Ok(Some(tx)) } self.storage_provider.transaction_by_hash(hash) @@ -990,7 +990,7 @@ impl TransactionsProvider for ConsistentProvider { if let Some((tx, meta)) = self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) { - return Ok(Some((tx.into(), meta))) + return Ok(Some((tx, meta))) } self.storage_provider.transaction_by_hash_with_meta(tx_hash) @@ -1011,18 +1011,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| { - Ok(Some( - block_state - .block_ref() - .block() - .body - .transactions - .iter() - .map(|tx| tx.clone().into()) - .collect(), - )) - }, + |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), ) } @@ -1033,18 +1022,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| { - Some( - block_state - .block_ref() - .block() - .body - .transactions - .iter() - .map(|tx| tx.clone().into()) - .collect(), - ) - }, + |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), |_| true, ) } diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs index 8f9a6395a9d..57bc2e0b5ce 100644 --- a/crates/storage/provider/src/providers/database/chain.rs +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -1,25 +1,41 @@ -use crate::{providers::NodeTypes, DatabaseProvider}; +use crate::{providers::NodeTypesForProvider, DatabaseProvider}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_node_types::FullNodePrimitives; use reth_primitives::EthPrimitives; -use reth_storage_api::{ChainStorageWriter, EthStorage}; +use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EthStorage}; /// Trait that provides access to implementations of [`ChainStorage`] pub trait ChainStorage: Send + Sync { + /// Provides access to the chain reader. + fn reader(&self) -> impl ChainStorageReader, Primitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider; + /// Provides access to the chain writer. fn writer(&self) -> impl ChainStorageWriter, Primitives> where TX: DbTxMut + DbTx + 'static, - Types: NodeTypes; + Types: NodeTypesForProvider; } impl ChainStorage for EthStorage { + fn reader( + &self, + ) -> impl ChainStorageReader, EthPrimitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } + fn writer( &self, ) -> impl ChainStorageWriter, EthPrimitives> where TX: DbTxMut + DbTx + 'static, - Types: NodeTypes, + Types: NodeTypesForProvider, { self } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d45b4e53124..47d9308283f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -50,12 +50,14 @@ use reth_node_types::{BlockTy, NodeTypes, TxTy}; use reth_primitives::{ Account, Block, BlockBody, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, + TransactionMeta, TransactionSignedNoHash, }; use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + BlockBodyReader, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -517,21 +519,11 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, - BF: FnOnce( - H, - Vec, - Vec
, - Vec
, - Option, - ) -> ProviderResult>, + BF: FnOnce(H, BlockBody, Vec
) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = header_by_number(block_number)? else { return Ok(None) }; - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = - self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - // Get the block body // // If the body indices are not found, this means that the transactions either do not exist @@ -548,9 +540,14 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions.into_iter().map(Into::into).collect(); + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(header.as_ref(), transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; - construct_block(header, body, senders, ommers, withdrawals) + construct_block(header, body, senders) } /// Returns a range of blocks from the database. @@ -572,7 +569,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut(H, Range, Vec
, Option) -> ProviderResult, + F: FnMut(H, BlockBody, Range) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -582,50 +579,41 @@ impl DatabaseProvider { let mut blocks = Vec::with_capacity(len); let headers = headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut block_body_cursor = self.tx.cursor_read::()?; + let mut present_headers = Vec::new(); for header in headers { - let header_ref = header.as_ref(); // If the body indices are not found, this means that the transactions either do // not exist in the database yet, or they do exit but are // not indexed. If they exist but are not indexed, we don't // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header_ref.number)? + block_body_cursor.seek_exact(header.as_ref().number)? { let tx_range = block_body_indices.tx_num_range(); - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { - withdrawals_cursor - .seek_exact(header_ref.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default() - .into() - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header_ref.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; - - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals) { - blocks.push(b); - } + present_headers.push((header, tx_range)); } } + let mut inputs = Vec::new(); + for (header, tx_range) in &present_headers { + let transactions = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + }; + + inputs.push((header.as_ref(), transactions)); + } + + let bodies = self.storage.reader().read_block_bodies(self, inputs)?; + + for ((header, tx_range), body) in present_headers.into_iter().zip(bodies) { + blocks.push(assemble_block(header, body, tx_range)?); + } + Ok(blocks) } @@ -649,34 +637,22 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, - BF: Fn( - H, - Vec, - Vec
, - Option, - Vec
, - ) -> ProviderResult, + BF: Fn(H, BlockBody, Vec
) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::>>()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) + self.block_range(range, headers_range, |header, body, tx_range| { + let senders = if tx_range.is_empty() { + Vec::new() } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); // fetch senders from the senders table let known_senders = senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { + let mut senders = Vec::with_capacity(body.transactions.len()); + for (tx_num, tx) in tx_range.zip(body.transactions()) { match known_senders.get(&tx_num) { None => { // recover the sender from the transaction if not found @@ -689,10 +665,10 @@ impl DatabaseProvider { } } - (body, senders) + senders }; - assemble_block(header, body, ommers, withdrawals, senders) + assemble_block(header, body, senders) }) } @@ -1230,21 +1206,22 @@ impl BlockReader for DatabaseProvid fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { - let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; - let ommers = self.ommers(number.into())?.unwrap_or_default(); // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. - let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions.into_iter().map(Into::into).collect(), - None => return Ok(None), + let Some(transactions) = self.transactions_by_block(number.into())? else { + return Ok(None) }; - return Ok(Some(Block { - header, - body: BlockBody { transactions, ommers, withdrawals }, - })) + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(&header, transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; + + return Ok(Some(Block { header, body })) } } @@ -1303,8 +1280,8 @@ impl BlockReader for DatabaseProvid id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Block { header, body } // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1324,8 +1301,8 @@ impl BlockReader for DatabaseProvid id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals| { - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + SealedBlock { header, body } // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1337,21 +1314,10 @@ impl BlockReader for DatabaseProvid } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::>>()?; self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals| { - let transactions = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; - Ok(Block { header, body: BlockBody { transactions, ommers, withdrawals } }) - }, + |header, body, _| Ok(Block { header, body }), ) } @@ -1362,8 +1328,8 @@ impl BlockReader for DatabaseProvid self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Block { header, body } .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1377,12 +1343,9 @@ impl BlockReader for DatabaseProvid self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - SealedBlockWithSenders::new( - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } }, - senders, - ) - .ok_or(ProviderError::SenderRecoveryError) + |header, body, senders| { + SealedBlockWithSenders::new(SealedBlock { header, body }, senders) + .ok_or(ProviderError::SenderRecoveryError) }, ) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 6d8e3ed5e17..30cac220d8b 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -20,7 +20,6 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; @@ -77,8 +76,9 @@ where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx: Value + From + Into, + SignedTx = TransactionSigned, BlockHeader = alloy_consensus::Header, + BlockBody = reth_primitives::BlockBody, >, >, { @@ -89,8 +89,9 @@ impl NodeTypesForProvider for T where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx: Value + From + Into, + SignedTx = TransactionSigned, BlockHeader = alloy_consensus::Header, + BlockBody = reth_primitives::BlockBody, >, > { diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index d5228bdddf7..baee2f870a9 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -1,10 +1,11 @@ use crate::DBProvider; use alloy_primitives::BlockNumber; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_db::{ - cursor::DbCursorRW, + cursor::{DbCursorRO, DbCursorRW}, models::{StoredBlockOmmers, StoredBlockWithdrawals}, tables, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; @@ -41,6 +42,38 @@ impl ChainStorageWriter = + (&'a ::Header, Vec<<::Body as BlockBody>::Transaction>); + +/// Trait that implements how block bodies are read from the storage. +/// +/// Note: Within the current abstraction, transactions persistence is handled separately, thus this +/// trait is provided with transactions read beforehand and is expected to construct the block body +/// from those transactions and additional data read from elsewhere. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyReader { + /// The block type. + type Block: Block; + + /// Receives a list of block headers along with block transactions and returns the block bodies. + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>>; +} + +/// Trait that implements how chain-specific types are read from storage. +pub trait ChainStorageReader: + BlockBodyReader +{ +} +impl ChainStorageReader for T where + T: BlockBodyReader +{ +} /// Ethereum storage implementation. #[derive(Debug, Default, Clone, Copy)] pub struct EthStorage; @@ -89,3 +122,47 @@ where Ok(()) } } + +impl BlockBodyReader for EthStorage +where + Provider: DBProvider + ChainSpecProvider, +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>> { + // TODO: Ideally storage should hold its own copy of chain spec + let chain_spec = provider.chain_spec(); + + let mut ommers_cursor = provider.tx_ref().cursor_read::()?; + let mut withdrawals_cursor = provider.tx_ref().cursor_read::()?; + + let mut bodies = Vec::with_capacity(inputs.len()); + + for (header, transactions) in inputs { + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default() + .into() + } else { + None + }; + let ommers = if chain_spec.final_paris_total_difficulty(header.number).is_some() { + Vec::new() + } else { + ommers_cursor.seek_exact(header.number)?.map(|(_, o)| o.ommers).unwrap_or_default() + }; + + bodies.push(reth_primitives::BlockBody { transactions, ommers, withdrawals }); + } + + Ok(bodies) + } +} diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 0f48c89a499..b3670496b5a 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -2,6 +2,7 @@ use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; +use reth_primitives_traits::BlockBody as _; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. @@ -42,6 +43,7 @@ impl BlobStoreCanonTracker { let iter = block .body .transactions() + .iter() .filter(|tx| tx.transaction.is_eip4844()) .map(|tx| tx.hash()); (*num, iter) From 9f37d40b78863f02c78c4e8c99002eb01425cf2c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 25 Nov 2024 17:44:00 +0400 Subject: [PATCH 677/970] feat: enforce relations between block parts on NodePrimitives (#12846) --- crates/primitives-traits/src/node.rs | 46 ++----------------- .../provider/src/providers/static_file/mod.rs | 6 +-- .../storage/provider/src/test_utils/noop.rs | 6 +-- 3 files changed, 11 insertions(+), 47 deletions(-) diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index fa1236b6dd3..f109fdc9b24 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,7 +1,8 @@ use core::fmt; use crate::{ - FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, FullTxType, MaybeSerde, + Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, + FullSignedTx, FullTxType, MaybeSerde, }; /// Configures all the primitive types of the node. @@ -9,38 +10,11 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type Block: Block
; /// Block header primitive. - type BlockHeader: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type BlockHeader: BlockHeader; /// Block body primitive. - type BlockBody: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type BlockBody: BlockBody; /// Signed version of the transaction type. type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; /// Transaction envelope type ID. @@ -57,16 +31,6 @@ pub trait NodePrimitives: + MaybeSerde + 'static; } - -impl NodePrimitives for () { - type Block = (); - type BlockHeader = (); - type BlockBody = (); - type SignedTx = (); - type TxType = (); - type Receipt = (); -} - /// Helper trait that sets trait bounds on [`NodePrimitives`]. pub trait FullNodePrimitives where diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 673451de65f..71c6bf755e2 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -172,7 +172,7 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::<()>::read_write(&static_dir) + let sf_rw = StaticFileProvider::::read_write(&static_dir) .expect("Failed to create static file provider") .with_custom_blocks_per_file(blocks_per_file); @@ -191,8 +191,8 @@ mod tests { // Helper function to prune headers and validate truncation results fn prune_and_validate( - writer: &mut StaticFileProviderRWRefMut<'_, ()>, - sf_rw: &StaticFileProvider<()>, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, prune_count: u64, expected_tip: Option, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 9a88c8c9ab7..fa1c6bad74f 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -22,8 +22,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -558,7 +558,7 @@ impl PruneCheckpointReader for NoopProvider { } impl StaticFileProviderFactory for NoopProvider { - type Primitives = (); + type Primitives = EthPrimitives; fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() From 6b088bd8819c9e1d66efc0a050c8c98e37ffd457 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 25 Nov 2024 14:15:12 +0000 Subject: [PATCH 678/970] perf(engine): sparse trie calculation for state root task (#12843) --- Cargo.lock | 1 + Cargo.toml | 1 + crates/engine/tree/Cargo.toml | 45 ++++++++------- crates/engine/tree/src/tree/root.rs | 90 +++++++++++++++++++++++++++-- crates/trie/common/src/proofs.rs | 32 ++++++++-- 5 files changed, 139 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3ccf46b34e..c8d4d2b5152 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7281,6 +7281,7 @@ dependencies = [ "reth-tracing", "reth-trie", "reth-trie-parallel", + "reth-trie-sparse", "revm-primitives", "thiserror 1.0.69", "tokio", diff --git a/Cargo.toml b/Cargo.toml index ad17ea4ad0c..113d0661f3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -419,6 +419,7 @@ reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } +reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 70be84a9f79..5242268b175 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -13,41 +13,43 @@ workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true +reth-blockchain-tree.workspace = true reth-chain-state.workspace = true -reth-consensus.workspace = true reth-chainspec.workspace = true +reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true -reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true +reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true reth-trie-parallel.workspace = true +reth-trie-sparse.workspace = true +reth-trie.workspace = true # alloy -alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-types-engine.workspace = true -alloy-consensus.workspace = true revm-primitives.workspace = true # common futures.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } thiserror.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } # metrics metrics.workspace = true @@ -64,20 +66,21 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-db = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-prune.workspace = true reth-prune-types.workspace = true +reth-prune.workspace = true reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true reth-tracing.workspace = true -reth-chainspec.workspace = true +# alloy alloy-rlp.workspace = true assert_matches.workspace = true @@ -90,23 +93,23 @@ harness = false [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", "reth-chainspec/test-utils", "reth-consensus/test-utils", + "reth-db/test-utils", "reth-evm/test-utils", + "reth-network-p2p/test-utils", "reth-payload-builder/test-utils", + "reth-primitives-traits/test-utils", "reth-primitives/test-utils", + "reth-provider/test-utils", + "reth-prune-types", + "reth-prune-types?/test-utils", "reth-revm/test-utils", "reth-stages-api/test-utils", - "reth-provider/test-utils", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", "reth-trie/test-utils", - "reth-prune-types?/test-utils", - "reth-primitives-traits/test-utils", ] diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 45cf5a78031..27f835ec754 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,15 +1,27 @@ //! State root task related functionality. +use alloy_primitives::map::FbHashMap; +use alloy_rlp::{BufMut, Encodable}; use reth_provider::providers::ConsistentDbView; -use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie::{ + updates::TrieUpdates, HashedPostState, MultiProof, Nibbles, TrieAccount, TrieInput, + EMPTY_ROOT_HASH, +}; use reth_trie_parallel::root::ParallelStateRootError; -use revm_primitives::{EvmState, B256}; -use std::sync::{ - mpsc::{self, Receiver, RecvError}, - Arc, +use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult}; +use revm_primitives::{map::FbHashSet, EvmState, B256}; +use std::{ + sync::{ + mpsc::{self, Receiver, RecvError}, + Arc, + }, + time::{Duration, Instant}, }; use tracing::debug; +/// The level below which the sparse trie hashes are calculated in [`update_sparse_trie`]. +const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; + /// Result of the state root calculation pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; @@ -133,6 +145,74 @@ where } } +/// Updates the sparse trie with the given proofs and state, and returns the updated trie and the +/// time it took. +#[allow(dead_code)] +fn update_sparse_trie( + mut trie: Box, + multiproof: MultiProof, + targets: FbHashMap<32, FbHashSet<32>>, + state: HashedPostState, +) -> SparseStateTrieResult<(Box, Duration)> { + let started_at = Instant::now(); + + // Reveal new accounts and storage slots. + for (address, slots) in targets { + let path = Nibbles::unpack(address); + trie.reveal_account(address, multiproof.account_proof_nodes(&path))?; + + let storage_proofs = multiproof.storage_proof_nodes(address, slots); + + for (slot, proof) in storage_proofs { + trie.reveal_storage_slot(address, slot, proof)?; + } + } + + // Update storage slots with new values and calculate storage roots. + let mut storage_roots = FbHashMap::default(); + for (address, storage) in state.storages { + if storage.wiped { + trie.wipe_storage(address)?; + storage_roots.insert(address, EMPTY_ROOT_HASH); + } + + for (slot, value) in storage.storage { + let slot_path = Nibbles::unpack(slot); + trie.update_storage_leaf( + address, + slot_path, + alloy_rlp::encode_fixed_size(&value).to_vec(), + )?; + } + + storage_roots.insert(address, trie.storage_root(address).unwrap()); + } + + // Update accounts with new values and include updated storage roots + for (address, account) in state.accounts { + let path = Nibbles::unpack(address); + + if let Some(account) = account { + let storage_root = storage_roots + .remove(&address) + .map(Some) + .unwrap_or_else(|| trie.storage_root(address)) + .unwrap_or(EMPTY_ROOT_HASH); + + let mut encoded = Vec::with_capacity(128); + TrieAccount::from((account, storage_root)).encode(&mut encoded as &mut dyn BufMut); + trie.update_account_leaf(path, encoded)?; + } else { + trie.remove_account_leaf(&path)?; + } + } + + trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + let elapsed = started_at.elapsed(); + + Ok((trie, elapsed)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index d0a5cd22042..8e014f6d8c6 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -26,6 +26,31 @@ pub struct MultiProof { } impl MultiProof { + /// Return the account proof nodes for the given account path. + pub fn account_proof_nodes(&self, path: &Nibbles) -> Vec<(Nibbles, Bytes)> { + self.account_subtree.matching_nodes_sorted(path) + } + + /// Return the storage proof nodes for the given storage slots of the account path. + pub fn storage_proof_nodes( + &self, + hashed_address: B256, + slots: impl IntoIterator, + ) -> Vec<(B256, Vec<(Nibbles, Bytes)>)> { + self.storages + .get(&hashed_address) + .map(|storage_mp| { + slots + .into_iter() + .map(|slot| { + let nibbles = Nibbles::unpack(slot); + (slot, storage_mp.subtree.matching_nodes_sorted(&nibbles)) + }) + .collect() + }) + .unwrap_or_default() + } + /// Construct the account proof from the multiproof. pub fn account_proof( &self, @@ -37,10 +62,9 @@ impl MultiProof { // Retrieve the account proof. let proof = self - .account_subtree - .matching_nodes_iter(&nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)) - .map(|(_, node)| node.clone()) + .account_proof_nodes(&nibbles) + .into_iter() + .map(|(_, node)| node) .collect::>(); // Inspect the last node in the proof. If it's a leaf node with matching suffix, From e2c42ae2426a44e6fd7d3fb71eacdc642b5902ad Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 25 Nov 2024 14:20:08 +0000 Subject: [PATCH 679/970] test(trie): use proof nodes helper (#12848) --- crates/trie/sparse/src/state.rs | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index b76da793710..d7e2f27b974 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -219,7 +219,6 @@ mod tests { use alloy_rlp::EMPTY_STRING_CODE; use arbitrary::Arbitrary; use assert_matches::assert_matches; - use itertools::Itertools; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; use reth_trie::{ @@ -314,18 +313,8 @@ mod tests { let storage_root = storage_hash_builder.root(); let proof_nodes = storage_hash_builder.take_proof_nodes(); - let storage_proof_1 = proof_nodes - .iter() - .filter(|(path, _)| path.is_empty() || slot_path_1.common_prefix_length(path) > 0) - .map(|(path, proof)| (path.clone(), proof.clone())) - .sorted_by_key(|(path, _)| path.clone()) - .collect::>(); - let storage_proof_2 = proof_nodes - .iter() - .filter(|(path, _)| path.is_empty() || slot_path_2.common_prefix_length(path) > 0) - .map(|(path, proof)| (path.clone(), proof.clone())) - .sorted_by_key(|(path, _)| path.clone()) - .collect::>(); + let storage_proof_1 = proof_nodes.matching_nodes_sorted(&slot_path_1); + let storage_proof_2 = proof_nodes.matching_nodes_sorted(&slot_path_2); let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); let address_path_1 = Nibbles::unpack(address_1); @@ -346,18 +335,8 @@ mod tests { let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); - let proof_1 = proof_nodes - .iter() - .filter(|(path, _)| path.is_empty() || address_path_1.common_prefix_length(path) > 0) - .map(|(path, proof)| (path.clone(), proof.clone())) - .sorted_by_key(|(path, _)| path.clone()) - .collect::>(); - let proof_2 = proof_nodes - .iter() - .filter(|(path, _)| path.is_empty() || address_path_2.common_prefix_length(path) > 0) - .map(|(path, proof)| (path.clone(), proof.clone())) - .sorted_by_key(|(path, _)| path.clone()) - .collect::>(); + let proof_1 = proof_nodes.matching_nodes_sorted(&address_path_1); + let proof_2 = proof_nodes.matching_nodes_sorted(&address_path_2); let mut sparse = SparseStateTrie::default().with_updates(true); sparse.reveal_account(address_1, proof_1).unwrap(); From c44e11b8adad05a3bcba0c2adad05c549ce36c1d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 25 Nov 2024 18:28:56 +0400 Subject: [PATCH 680/970] feat: add `Block` AT to `BlockReader` (#12837) --- Cargo.lock | 1 + book/sources/exex/hello-world/src/bin/3.rs | 6 +- book/sources/exex/remote/src/exex.rs | 3 +- book/sources/exex/remote/src/exex_4.rs | 3 +- book/sources/exex/tracking-state/src/bin/1.rs | 4 +- book/sources/exex/tracking-state/src/bin/2.rs | 4 +- crates/consensus/beacon/src/engine/mod.rs | 14 +- crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/node.rs | 14 +- crates/e2e-test-utils/src/rpc.rs | 9 +- crates/engine/tree/src/tree/mod.rs | 13 +- crates/engine/util/src/reorg.rs | 4 +- crates/ethereum/node/src/node.rs | 4 +- crates/exex/exex/Cargo.toml | 1 + crates/exex/exex/src/backfill/job.rs | 31 +-- crates/exex/exex/src/backfill/stream.rs | 16 +- crates/exex/exex/src/context.rs | 19 +- crates/exex/exex/src/dyn_context.rs | 3 +- crates/exex/exex/src/notifications.rs | 28 ++- crates/exex/test-utils/src/lib.rs | 4 +- crates/net/network/src/config.rs | 2 +- crates/net/network/src/eth_requests.rs | 11 +- crates/net/network/src/test_utils/testnet.rs | 6 +- crates/node/builder/src/builder/mod.rs | 4 +- crates/optimism/node/src/node.rs | 10 +- crates/optimism/node/src/txpool.rs | 4 +- crates/optimism/node/tests/it/priority.rs | 10 +- crates/optimism/rpc/src/eth/mod.rs | 6 +- crates/optimism/rpc/src/eth/pending_block.rs | 2 +- crates/primitives/src/block.rs | 5 + crates/primitives/src/lib.rs | 4 +- crates/prune/prune/src/builder.rs | 12 +- crates/prune/prune/src/segments/set.rs | 6 +- .../src/segments/user/transaction_lookup.rs | 4 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/src/eth.rs | 7 +- crates/rpc/rpc-builder/src/lib.rs | 28 ++- crates/rpc/rpc-engine-api/src/engine_api.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 31 ++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 8 +- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 5 +- .../rpc-eth-api/src/helpers/pending_block.rs | 10 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 5 +- .../rpc-eth-api/src/helpers/transaction.rs | 10 +- crates/rpc/rpc-eth-types/Cargo.toml | 2 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 21 +- crates/rpc/rpc-eth-types/src/logs_utils.rs | 3 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 16 +- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 2 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/trace.rs | 3 +- crates/rpc/rpc/src/trace.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 16 +- .../src/providers/blockchain_provider.rs | 41 ++-- .../provider/src/providers/consistent.rs | 41 ++-- .../provider/src/providers/database/mod.rs | 34 ++-- .../src/providers/database/provider.rs | 56 ++--- crates/storage/provider/src/providers/mod.rs | 40 ++-- .../src/providers/static_file/manager.rs | 36 ++-- .../storage/provider/src/test_utils/mock.rs | 2 + .../storage/provider/src/test_utils/noop.rs | 2 + crates/storage/provider/src/traits/full.rs | 6 +- crates/storage/storage-api/src/block.rs | 192 ++++++++++++++++-- crates/storage/storage-api/src/chain.rs | 1 + .../custom-payload-builder/src/generator.rs | 6 +- examples/custom-payload-builder/src/main.rs | 9 +- examples/db-access/src/main.rs | 5 +- examples/rpc-db/src/myrpc_ext.rs | 2 +- 69 files changed, 664 insertions(+), 267 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8d4d2b5152..de9f923b41f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7095,6 +7095,7 @@ dependencies = [ "alloy-eips", "alloy-network", "alloy-primitives", + "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-signer", diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index 21bd25a56db..ebeaf6c84f1 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,12 @@ use futures_util::TryStreamExt; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { +async fn my_exex>>( + mut ctx: ExExContext, +) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { ExExNotification::ChainCommitted { new } => { diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 1ae4785db8b..00392b4dad1 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -44,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index 24c7bf2c2f1..c37f26d739d 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -46,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 0d42e0791a1..2cf43bec3a1 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,7 @@ struct MyExEx { ctx: ExExContext, } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index 9416810668f..b58d2a39c85 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,7 @@ impl MyExEx { } } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 3d0f65423e4..13195a5885d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,4 @@ -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ @@ -21,7 +21,7 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, EthBlockClient, }; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{Block, BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; @@ -228,7 +228,7 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader @@ -946,7 +946,7 @@ where .blockchain .find_block_by_hash(safe_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.header, safe_block_hash)); + self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); } Ok(()) } @@ -966,9 +966,9 @@ where .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.number)?; + self.blockchain.finalize_block(finalized.header().number())?; self.blockchain - .set_finalized(SealedHeader::new(finalized.header, finalized_block_hash)); + .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); } Ok(()) } @@ -1798,7 +1798,7 @@ where N: EngineNodeTypes, Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 9c40e2ba99d..4619c357335 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -35,6 +35,7 @@ url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true futures-util.workspace = true diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 09c54a867ce..cbe69558e65 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,5 +1,6 @@ use std::{marker::PhantomData, pin::Pin}; +use alloy_consensus::{BlockHeader, Sealable}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; @@ -15,6 +16,7 @@ use reth::{ }, }; use reth_chainspec::EthereumHardforks; +use reth_node_api::{Block, FullBlock, NodePrimitives}; use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; use reth_stages_types::StageId; use tokio_stream::StreamExt; @@ -51,7 +53,11 @@ impl NodeTestContext where Engine: EngineTypes, Node: FullNodeComponents, - Node::Types: NodeTypesWithEngine, + Node::Types: NodeTypesWithEngine< + ChainSpec: EthereumHardforks, + Engine = Engine, + Primitives: NodePrimitives, + >, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns, { @@ -178,7 +184,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - assert_eq!(latest_block.hash_slow(), expected_block_hash); + assert_eq!(latest_block.header().hash_slow(), expected_block_hash); break } assert!( @@ -225,10 +231,10 @@ where if let Some(latest_block) = self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? { - if latest_block.number == block_number { + if latest_block.header().number() == block_number { // make sure the block hash we submitted via FCU engine api is the new latest // block using an RPC call - assert_eq!(latest_block.hash_slow(), block_hash); + assert_eq!(latest_block.header().hash_slow(), block_hash); break } } diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 7b7dabdf240..a57861d2b14 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,6 +1,7 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; +use alloy_rlp::Encodable; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, rpc::api::{ @@ -12,6 +13,7 @@ use reth::{ }, }; use reth_chainspec::EthereumHardforks; +use reth_node_api::NodePrimitives; use reth_node_builder::NodeTypes; #[allow(missing_debug_implementations)] @@ -21,7 +23,12 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents>, + Node: FullNodeComponents< + Types: NodeTypes< + ChainSpec: EthereumHardforks, + Primitives: NodePrimitives, + >, + >, EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 66eff882c92..f36ab3ea853 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -4,7 +4,7 @@ use crate::{ engine::{DownloadRequest, EngineApiEvent, FromEngine}, persistence::PersistenceHandle, }; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -536,7 +536,12 @@ impl std::fmt::Debug impl EngineApiTreeHandler where - P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, + P: DatabaseProviderFactory + + BlockReader + + StateProviderFactory + + StateReader + + Clone + + 'static,

::Provider: BlockReader, E: BlockExecutorProvider, T: EngineTypes, @@ -1539,8 +1544,8 @@ where .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; let execution_output = self .provider - .get_state(block.number)? - .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; + .get_state(block.number())? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; let hashed_state = execution_output.hash_state_slow(); Ok(Some(ExecutedBlock { diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 1e2451f24c3..46a5e08a738 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -109,7 +109,7 @@ impl Stream for EngineReorg>, Engine: EngineTypes, - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm

, Spec: EthereumHardforks, { @@ -256,7 +256,7 @@ fn create_reorg_head( next_sidecar: ExecutionPayloadSidecar, ) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthereumHardforks, { diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a2ae2374b96..c4e1de2760f 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -53,7 +53,7 @@ impl EthereumNode { EthereumConsensusBuilder, > where - Node: FullNodeTypes>, + Node: FullNodeTypes>, ::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -304,7 +304,7 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 3cbeb115b06..b70fb921599 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -35,6 +35,7 @@ reth-tasks.workspace = true reth-tracing.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 08d91e39197..7e670620472 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -4,12 +4,14 @@ use std::{ time::{Duration, Instant}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockExt, BlockWithSenders, Receipt}; -use reth_primitives_traits::format_gas_throughput; +use reth_node_api::{Block as _, BlockBody as _}; +use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; +use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, }; @@ -37,7 +39,9 @@ pub struct BackfillJob { impl Iterator for BackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + + BlockReader + + StateProviderFactory, { type Item = BackfillJobResult; @@ -53,7 +57,9 @@ where impl BackfillJob where E: BlockExecutorProvider, - P: BlockReader + HeaderProvider + StateProviderFactory, + P: BlockReader + + HeaderProvider + + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -100,10 +106,10 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.gas_used(); // Configure the executor to use the current state. - trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); @@ -111,8 +117,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); let (unsealed_header, hash) = block.header.split(); - let block = - Block { header: unsealed_header, body: block.body }.with_senders_unchecked(senders); + let block = P::Block::new(unsealed_header, block.body).with_senders_unchecked(senders); executor.execute_and_verify_one((&block, td).into())?; execution_duration += execute_start.elapsed(); @@ -134,7 +139,7 @@ where } } - let last_block_number = blocks.last().expect("blocks should not be empty").number; + let last_block_number = blocks.last().expect("blocks should not be empty").number(); debug!( target: "exex::backfill", range = ?*self.range.start()..=last_block_number, @@ -165,7 +170,7 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; @@ -177,7 +182,7 @@ where impl SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. pub fn into_stream( @@ -189,7 +194,7 @@ where pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { let td = self .provider .header_td_by_number(block_number)? @@ -206,7 +211,7 @@ where self.provider.history_by_block_number(block_number.saturating_sub(1))?, )); - trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body().transactions().len(), "Executing block"); let block_execution_output = executor.execute((&block_with_senders, td).into())?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index c55b8651daf..46177ceda12 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -103,7 +103,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; @@ -136,7 +142,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 4e0d9f5956c..3d303c9bbac 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -3,6 +3,7 @@ use reth_exex_types::ExExHead; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; +use reth_provider::BlockReader; use reth_tasks::TaskExecutor; use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; @@ -56,7 +57,7 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { /// Returns dynamic version of the context @@ -106,13 +107,19 @@ where /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of /// notifications without a head. - pub fn set_notifications_without_head(&mut self) { + pub fn set_notifications_without_head(&mut self) + where + Node::Provider: BlockReader, + { self.notifications.set_without_head(); } /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications /// with the provided head. - pub fn set_notifications_with_head(&mut self, head: ExExHead) { + pub fn set_notifications_with_head(&mut self, head: ExExHead) + where + Node::Provider: BlockReader, + { self.notifications.set_with_head(head); } } @@ -121,6 +128,7 @@ where mod tests { use reth_exex_types::ExExHead; use reth_node_api::FullNodeComponents; + use reth_provider::BlockReader; use crate::ExExContext; @@ -132,7 +140,10 @@ mod tests { ctx: ExExContext, } - impl ExEx { + impl ExEx + where + Node::Provider: BlockReader, + { async fn _test_bounds(mut self) -> eyre::Result<()> { self.ctx.pool(); self.ctx.block_executor(); diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index b48a6ebc951..3ce0f488f40 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -6,6 +6,7 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; use reth_node_api::FullNodeComponents; use reth_node_core::node_config::NodeConfig; +use reth_provider::BlockReader; use tokio::sync::mpsc; use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; @@ -51,7 +52,7 @@ impl Debug for ExExContextDyn { impl From> for ExExContextDyn where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 6c1e12d8d76..954a057fc09 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -90,7 +90,12 @@ impl ExExNotifications { impl ExExNotificationsStream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { @@ -139,7 +144,12 @@ where impl Stream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; @@ -262,7 +272,12 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { /// Checks if the ExEx head is on the canonical chain. @@ -339,7 +354,12 @@ where impl Stream for ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 15c770c5e9e..ca0ea46551c 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -265,7 +265,7 @@ pub async fn test_exex_context_with_chain_spec( let (static_dir, _) = create_test_static_files_dir(); let db = create_test_rw_db(); - let provider_factory = ProviderFactory::new( + let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), @@ -289,7 +289,7 @@ pub async fn test_exex_context_with_chain_spec( let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let components = NodeAdapter::, _>, _> { + let components = NodeAdapter::, _> { components: Components { transaction_pool, evm_config, diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index e54000895a7..2e8f9f4cc7a 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -147,7 +147,7 @@ where impl NetworkConfig where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + HeaderProvider + Clone + Unpin + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. pub async fn start_network(self) -> Result { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 0f9348a42ce..c0d22f97da8 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -15,7 +15,7 @@ use reth_eth_wire::{ use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::BlockBody; +use reth_primitives_traits::Block; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, @@ -157,7 +157,9 @@ where &self, _peer_id: PeerId, request: GetBlockBodies, - response: oneshot::Sender>>, + response: oneshot::Sender< + RequestResult::Body>>, + >, ) { self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); @@ -166,8 +168,7 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body: BlockBody = block.into(); - + let (_, body) = block.split(); total_bytes += body.length(); bodies.push(body); @@ -223,7 +224,7 @@ where /// This should be spawned or used as part of `tokio::select!`. impl Future for EthRequestHandler where - C: BlockReader + HeaderProvider + Unpin, + C: BlockReader + HeaderProvider + Unpin, { type Output = (); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index a64084f2cf9..34c08f637ba 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -194,7 +194,7 @@ where impl Testnet where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + HeaderProvider + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, { /// Spawns the testnet to a separate task @@ -253,7 +253,7 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + HeaderProvider + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); @@ -448,7 +448,7 @@ where impl Future for Peer where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + HeaderProvider + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 7cbad445da1..56ee7d9d640 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -34,7 +34,7 @@ use reth_node_core::{ }; use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider}, - ChainSpecProvider, FullProvider, + BlockReader, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; @@ -651,6 +651,7 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: BlockReader, { self.start_network_with(builder, pool, Default::default()) } @@ -669,6 +670,7 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: BlockReader, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index cc0a61833e4..2150b6bafd4 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -135,7 +135,11 @@ impl OpNode { > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; @@ -293,7 +297,7 @@ pub struct OpPoolBuilder { impl PoolBuilder for OpPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = OpTransactionPool; @@ -522,7 +526,7 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index a5616569c86..6db5d69568b 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -69,7 +69,7 @@ impl OpTransactionValidator { impl OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. @@ -195,7 +195,7 @@ where impl TransactionValidator for OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { type Transaction = Tx; diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index c1df9180ce3..66aeaa295cb 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -25,6 +25,7 @@ use reth_optimism_node::{ OpEngineTypes, OpNode, }; use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_primitives::OpPrimitives; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::providers::BlockchainProvider2; @@ -90,8 +91,13 @@ fn build_components( OpConsensusBuilder, > where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = RollupArgs::default(); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 60af6542e28..8690d1a262a 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -20,8 +20,8 @@ use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, - StageCheckpointReader, StateProviderFactory, + BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -249,7 +249,7 @@ where impl Trace for OpEthApi where - Self: LoadState>, + Self: RpcNodeCore + LoadState>, N: RpcNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 782f78dd4aa..0319e15c81e 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -24,7 +24,7 @@ impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, N: RpcNodeCore< - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index d7babfc6289..ac71c791c33 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -489,6 +489,11 @@ where } } +/// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. +pub type SealedBlockFor = SealedBlock< + ::Header, + ::Body, +>; /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] pub struct SealedBlockWithSenders { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 049d4f202a6..52d573f2b3b 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -35,7 +35,9 @@ pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; +pub use block::{ + Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, +}; #[cfg(feature = "reth-codec")] pub use compression::*; pub use receipt::{ diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 85697160115..d987ed1edb6 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,11 +1,12 @@ use crate::{segments::SegmentSet, Pruner}; +use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; use reth_db::transaction::DbTxMut; use reth_exex_types::FinishedExExHeight; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, + PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -77,7 +78,9 @@ impl PrunerBuilder { pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner where PF: DatabaseProviderFactory< - ProviderRW: PruneCheckpointWriter + BlockReader + StaticFileProviderFactory, + ProviderRW: PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory, > + StaticFileProviderFactory< Primitives = ::Primitives, >, @@ -103,9 +106,8 @@ impl PrunerBuilder { where Provider: StaticFileProviderFactory + DBProvider - + BlockReader - + PruneCheckpointWriter - + TransactionsProvider, + + BlockReader + + PruneCheckpointWriter, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 62c252fc54b..198d01ce44d 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -2,10 +2,11 @@ use crate::segments::{ AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, UserReceipts, }; +use alloy_eips::eip2718::Encodable2718; use reth_db::transaction::DbTxMut; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, - StaticFileProviderFactory, TransactionsProvider, + StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -47,9 +48,8 @@ impl SegmentSet where Provider: StaticFileProviderFactory + DBProvider - + TransactionsProvider + PruneCheckpointWriter - + BlockReader, + + BlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index ce9d90c291b..27f4f5085d2 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -6,7 +6,7 @@ use crate::{ use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; +use reth_provider::{BlockReader, DBProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, }; @@ -25,7 +25,7 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 04e97f99e34..a0712d617b6 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true +reth-primitives.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index e88f6aa86bb..5326a10e463 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -27,7 +27,12 @@ pub struct EthHandlers { impl EthHandlers where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ccf19ed1a0b..0f850f0457a 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -37,8 +37,9 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: -//! FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider +//! + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -112,8 +113,9 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: -//! FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider +//! + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -258,7 +260,7 @@ pub async fn launch, ) -> Result where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -641,6 +643,7 @@ where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, + Provider: BlockReader::Block>, { let Self { provider, @@ -716,6 +719,7 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, + Provider: BlockReader, { let Self { provider, @@ -750,6 +754,7 @@ where ) -> TransportRpcModules<()> where EthApi: FullEthApiServer, + Provider: BlockReader::Block>, { let mut modules = TransportRpcModules::default(); @@ -907,7 +912,12 @@ pub struct RpcRegistryInner< impl RpcRegistryInner where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, @@ -1112,6 +1122,7 @@ where pub fn register_debug(&mut self) -> &mut Self where EthApi: EthApiSpec + EthTransactions + TraceExt, + Provider: BlockReader::Block>, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1126,6 +1137,7 @@ where pub fn register_trace(&mut self) -> &mut Self where EthApi: TraceExt, + Provider: BlockReader::Block>, { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1264,7 +1276,9 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider::Block> + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 7773b5084c9..1062363eafb 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -75,7 +75,11 @@ struct EngineApiInner EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -487,7 +491,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -735,7 +739,11 @@ where impl EngineApiServer for EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 251ca225eb1..c78c7c59876 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,21 +2,32 @@ use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_eips::BlockId; use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; +use reth_node_api::BlockBody; +use reth_primitives::{Receipt, SealedBlockFor, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; use reth_rpc_types_compat::block::from_block; -use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{ + node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, + RpcReceipt, +}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. -pub type BlockAndReceiptsResult = Result>)>, E>; +pub type BlockAndReceiptsResult = Result< + Option<( + SealedBlockFor<<::Provider as BlockReader>::Block>, + Arc>, + )>, + ::Error, +>; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. @@ -49,7 +60,7 @@ pub trait EthBlocks: LoadBlock { let block_hash = block.hash(); let mut total_difficulty = self .provider() - .header_td_by_number(block.number) + .header_td_by_number(block.number()) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using @@ -83,7 +94,7 @@ pub trait EthBlocks: LoadBlock { .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.transactions.len())) + .map(|block| block.body.transactions().len())) } let block_hash = match self @@ -120,7 +131,7 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadReceipt, { @@ -198,10 +209,16 @@ pub trait EthBlocks: LoadBlock { /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. + #[expect(clippy::type_complexity)] fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future< + Output = Result< + Option::Block>>>, + Self::Error, + >, + > + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index d7e74c37b56..c7f346e951e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -18,6 +18,7 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_api::BlockBody; use reth_primitives::TransactionSigned; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_revm::{ @@ -278,14 +279,15 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { // we're essentially replaying the transactions in the block here, hence we need the // state that points to the beginning of the block, which is the state at // the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = + transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself, // however only if we're not targeting the pending block, because for pending we can't // rely on the block's state being available - if !is_block_target_pending && num_txs == block.body.transactions.len() { + if !is_block_target_pending && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 8ed45d2ac08..0099e0f6b16 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,5 +1,6 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use alloy_consensus::BlockHeader; use alloy_primitives::U256; use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; @@ -287,7 +288,7 @@ pub trait LoadFee: LoadBlock { .block_with_senders(BlockNumberOrTag::Pending.into()) .await? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? - .base_fee_per_gas + .base_fee_per_gas() .ok_or(EthApiError::InvalidTransaction( RpcInvalidTransactionError::TxTypeNotSupported, ))?; @@ -324,7 +325,7 @@ pub trait LoadFee: LoadBlock { let suggested_tip = self.suggested_priority_fee(); async move { let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; - let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); Ok(suggested_tip + U256::from(base_fee)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 0f20cb4204e..c166c31d755 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -45,7 +45,7 @@ use tracing::debug; pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, @@ -114,9 +114,15 @@ pub trait LoadPendingBlock: } /// Returns the locally built pending block + #[expect(clippy::type_complexity)] fn local_pending_block( &self, - ) -> impl Future)>, Self::Error>> + Send + ) -> impl Future< + Output = Result< + Option<(SealedBlockWithSenders<::Block>, Vec)>, + Self::Error, + >, + > + Send where Self: SpawnBlocking, { diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a1e6084da55..114b4c41d90 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -10,6 +10,7 @@ use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::SealedBlockWithSenders; +use reth_provider::BlockReader; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -24,7 +25,7 @@ use revm_primitives::{ use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState> { +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -230,7 +231,7 @@ pub trait Trace: LoadState> { fn trace_block_until( &self, block_id: BlockId, - block: Option>, + block: Option::Block>>>, highest_index: Option, config: TracingInspectorConfig, f: F, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d898c0fe832..3b6fc837c40 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,7 +1,7 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; @@ -199,8 +199,8 @@ pub trait EthTransactions: LoadTransaction { async move { if let Some(block) = self.block_with_senders(block_id).await? { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { hash: Some(tx.hash()), @@ -275,8 +275,8 @@ pub trait EthTransactions: LoadTransaction { .await? .and_then(|block| { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); block .transactions_with_sender() diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 98b9530d63c..11bf6c6231d 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -29,6 +29,7 @@ reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true @@ -36,7 +37,6 @@ alloy-rpc-types-eth.workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index b4a110e96af..9e83e323c1a 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -105,7 +105,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, EvmConfig: ConfigureEvm
, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) @@ -122,7 +127,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { @@ -337,7 +347,12 @@ where impl Future for EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 5ead11b7115..2e41c7a1183 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::BlockReader; use std::sync::Arc; @@ -58,7 +59,7 @@ pub enum ProviderOrBlock<'a, P: BlockReader> { /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs( +pub fn append_matching_block_logs>( all_logs: &mut Vec, provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index d8f413650a3..116026c2ddd 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,10 +4,12 @@ use std::time::Instant; +use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. @@ -23,26 +25,26 @@ pub struct PendingBlockEnv { /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(SealedHeader), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { matches!(self, Self::ActualPending(_)) } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option { + pub fn into_actual_pending(self) -> Option> { match self { Self::ActualPending(block) => Some(block), _ => None, @@ -67,13 +69,13 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.parent_hash, + Self::ActualPending(block) => block.header().parent_hash(), Self::DerivedFromLatest(header) => header.hash(), } } /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { + pub fn header(&self) -> &SealedHeader { match self { Self::ActualPending(block) => &block.header, Self::DerivedFromLatest(header) => header, diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 5425de402f8..ad3294d503c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -793,7 +793,7 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index dac37152942..3087af52d69 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -432,7 +432,7 @@ mod tests { use crate::EthApi; fn build_test_eth_api< - P: BlockReaderIdExt + P: BlockReaderIdExt + BlockReader + ChainSpecProvider + EvmEnvProvider diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 8540a4684bf..23e5f671dbe 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -18,7 +18,7 @@ impl LoadPendingBlock where Self: SpawnBlocking + RpcNodeCore< - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index d9fe5e18a05..9c60a4c105f 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -2,11 +2,12 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; impl Trace for EthApi where - Self: LoadState> + Self: LoadState> { } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 81dc8ff8b8a..f81eefdc5ff 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -74,7 +74,7 @@ impl TraceApi { impl TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider @@ -565,7 +565,7 @@ where #[async_trait] impl TraceApiServer for TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 16234ad483f..c76c2c732a2 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; @@ -12,7 +12,7 @@ use reth_evm::{ use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives::{SealedHeader, StaticFileSegment}; -use reth_primitives_traits::{format_gas_throughput, NodePrimitives}; +use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, @@ -176,7 +176,7 @@ impl Stage for ExecutionStage where E: BlockExecutorProvider, Provider: DBProvider - + BlockReader + + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter @@ -270,17 +270,17 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.header().gas_used(); // Configure the executor to use the current state. - trace!(target: "sync::stages::execution", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "sync::stages::execution", number = block_number, txs = block.body().transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::seal(block.header.clone())), + block: Box::new(SealedHeader::seal(block.header().clone())), error: BlockErrorKind::Execution(error), }) })?; @@ -304,7 +304,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used; + stage_checkpoint.progress.processed += block.gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -343,7 +343,7 @@ where // the `has_exexs` check here as well if !blocks.is_empty() { let blocks = blocks.into_iter().map(|block| { - let hash = block.header.hash_slow(); + let hash = block.header().hash_slow(); block.seal(hash) }); diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index d90b227c112..385ae67d68f 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,10 +25,11 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, }; use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -259,23 +260,33 @@ impl BlockIdReader for BlockchainProvider2 { } impl BlockReader for BlockchainProvider2 { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.consistent_provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -300,7 +311,7 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders(id, transaction_kind) } @@ -308,25 +319,25 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders_range(range) } } @@ -670,9 +681,9 @@ where impl BlockReaderIdExt for BlockchainProvider2 where - Self: BlockReader + ReceiptProviderIdExt, + Self: ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.block_by_id(id) } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 740392ad993..0abd23749c2 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -18,11 +18,12 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::TxTy; +use reth_node_types::{BlockTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Account, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; @@ -778,7 +779,13 @@ impl BlockIdReader for ConsistentProvider { } impl BlockReader for ConsistentProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { match source { BlockSource::Any | BlockSource::Canonical => { // Note: it's fine to return the unsealed block because the caller already has @@ -795,7 +802,7 @@ impl BlockReader for ConsistentProvider { } } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), @@ -803,15 +810,19 @@ impl BlockReader for ConsistentProvider { ) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -873,7 +884,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block_with_senders(id, transaction_kind), @@ -885,7 +896,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), @@ -893,7 +904,7 @@ impl BlockReader for ConsistentProvider { ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), @@ -905,7 +916,7 @@ impl BlockReader for ConsistentProvider { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_with_senders_range(range), @@ -917,7 +928,7 @@ impl BlockReader for ConsistentProvider { fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), @@ -1011,7 +1022,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + |block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())), ) } @@ -1022,7 +1033,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()), |_| true, ) } @@ -1289,7 +1300,7 @@ impl ChainSpecProvider for ConsistentProvider { } impl BlockReaderIdExt for ConsistentProvider { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 57f09e72306..b2ea8519163 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,9 +19,9 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; use reth_primitives::{ - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -353,23 +353,33 @@ impl BlockNumReader for ProviderFactory { } impl BlockReader for ProviderFactory { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { self.provider()?.pending_block_with_senders() } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } @@ -388,7 +398,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders(id, transaction_kind) } @@ -396,25 +406,25 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders_range(range) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 47d9308283f..3723e2606dc 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -46,13 +46,13 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{BlockTy, NodeTypes, TxTy}; +use reth_node_types::{BlockTy, BodyTy, NodeTypes, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + Account, Block, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSignedNoHash, }; -use reth_primitives_traits::{BlockBody as _, SignedTransaction}; +use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -457,7 +457,7 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, @@ -519,7 +519,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, - BF: FnOnce(H, BlockBody, Vec
) -> ProviderResult>, + BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = header_by_number(block_number)? else { return Ok(None) }; @@ -569,7 +569,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut(H, BlockBody, Range) -> ProviderResult, + F: FnMut(H, BodyTy, Range) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -637,7 +637,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, - BF: Fn(H, BlockBody, Vec
) -> ProviderResult, + BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { let mut senders_cursor = self.tx.cursor_read::()?; @@ -651,7 +651,7 @@ impl DatabaseProvider { .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.transactions.len()); + let mut senders = Vec::with_capacity(body.transactions().len()); for (tx_num, tx) in tx_range.zip(body.transactions()) { match known_senders.get(&tx_num) { None => { @@ -1190,7 +1190,13 @@ impl BlockNumReader for DatabaseProvider BlockReader for DatabaseProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) } else { @@ -1203,7 +1209,7 @@ impl BlockReader for DatabaseProvid /// If the header for this block is not found, this returns `None`. /// If the header is found, but the transactions either do not exist, or are not indexed, this /// will return None. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { // If the body indices are not found, this means that the transactions either do not @@ -1228,15 +1234,19 @@ impl BlockReader for DatabaseProvid Ok(None) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(None) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1275,13 +1285,13 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.header_by_number(block_number), |header, body, senders| { - Block { header, body } + Self::Block::new(header, body) // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1296,7 +1306,7 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, @@ -1313,23 +1323,23 @@ impl BlockReader for DatabaseProvid ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.block_range( range, |range| self.headers_range(range), - |header, body, _| Ok(Block { header, body }), + |header, body, _| Ok(Self::Block::new(header, body)), ) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.headers_range(range), |header, body, senders| { - Block { header, body } + Self::Block::new(header, body) .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1339,7 +1349,7 @@ impl BlockReader for DatabaseProvid fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), @@ -2812,7 +2822,7 @@ impl BlockWrite fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option<::Body>)>, + bodies: Vec<(BlockNumber, Option>)>, write_transactions_to: StorageLocation, ) -> ProviderResult<()> { let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 30cac220d8b..abd92312715 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -22,10 +22,10 @@ use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubs use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + Account, BlockWithSenders, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -79,6 +79,7 @@ where SignedTx = TransactionSigned, BlockHeader = alloy_consensus::Header, BlockBody = reth_primitives::BlockBody, + Block = reth_primitives::Block, >, >, { @@ -92,6 +93,7 @@ impl NodeTypesForProvider for T where SignedTx = TransactionSigned, BlockHeader = alloy_consensus::Header, BlockBody = reth_primitives::BlockBody, + Block = reth_primitives::Block, >, > { @@ -333,7 +335,13 @@ impl BlockIdReader for BlockchainProvider { } impl BlockReader for BlockchainProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { let block = match source { BlockSource::Any => { // check database first @@ -352,22 +360,26 @@ impl BlockReader for BlockchainProvider { Ok(block) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), BlockHashOrNumber::Number(num) => self.database.block_by_number(num), } } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.tree.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.tree.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.tree.pending_block_and_receipts()) } @@ -392,7 +404,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders(id, transaction_kind) } @@ -400,25 +412,25 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders_range(range) } } @@ -847,7 +859,7 @@ impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 14821fde547..34f3b91f627 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -31,14 +31,14 @@ use reth_db_api::{ cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; -use reth_node_types::NodePrimitives; +use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives::{ static_file::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, transaction::recover_signers, - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; use reth_primitives_traits::SignedTransaction; @@ -1380,9 +1380,7 @@ impl> ReceiptProvider } } -impl> TransactionsProviderExt - for StaticFileProvider -{ +impl> TransactionsProviderExt for StaticFileProvider { fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1577,32 +1575,38 @@ impl BlockNumReader for StaticFileProvider { } } -impl> BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { + type Block = N::Block; + fn find_block_by_hash( &self, _hash: B256, _source: BlockSource, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1621,7 +1625,7 @@ impl> BlockReader for Sta &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1630,12 +1634,12 @@ impl> BlockReader for Sta &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1643,14 +1647,14 @@ impl> BlockReader for Sta fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index a0ecb7256cb..a99b85af904 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -470,6 +470,8 @@ impl BlockIdReader for MockEthProvider { } impl BlockReader for MockEthProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index fa1c6bad74f..892965fbff2 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -92,6 +92,8 @@ impl BlockNumReader for NoopProvider { } impl BlockReader for NoopProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 9bb357e33a3..2735859e3a8 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,13 +7,13 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + StaticFileProviderFactory - + BlockReaderIdExt> + + BlockReaderIdExt, Block = BlockTy> + AccountReader + StateProviderFactory + EvmEnvProvider @@ -31,7 +31,7 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + StaticFileProviderFactory - + BlockReaderIdExt> + + BlockReaderIdExt, Block = BlockTy> + AccountReader + StateProviderFactory + EvmEnvProvider diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 37c7857f1c2..494a7e5aa41 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -7,7 +7,7 @@ use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, }; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -47,7 +47,6 @@ impl BlockSource { /// /// If not requested otherwise, implementers of this trait should prioritize fetching blocks from /// the database. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReader: BlockNumReader + HeaderProvider @@ -57,32 +56,46 @@ pub trait BlockReader: + Send + Sync { + /// The block type this provider reads. + type Block: reth_primitives_traits::Block< + Body: reth_primitives_traits::BlockBody, + >; + /// Tries to find in the given block source. /// /// Note: this only operates on the hash because the number might be ambiguous. /// /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult>; /// Returns the block with given id from the database. /// /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider - /// and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>; + /// Note: This returns a [`SealedBlockFor`] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// Note: This returns a [`SealedBlockWithSenders`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>; + fn pending_block_with_senders( + &self, + ) -> ProviderResult>>; /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> ProviderResult)>>; + #[allow(clippy::type_complexity)] + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>>; /// Returns the ommers/uncle headers of the given block from the database. /// @@ -92,14 +105,14 @@ pub trait BlockReader: /// Returns the block with matching hash from the database. /// /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { self.block(hash.into()) } /// Returns the block with matching number from database. /// /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> ProviderResult> { + fn block_by_number(&self, num: u64) -> ProviderResult> { self.block(num.into()) } @@ -117,7 +130,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns the sealed block with senders with matching number or hash from database. /// @@ -128,26 +141,164 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; /// Returns a range of blocks from the database, along with the senders of each /// transaction in the blocks. fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns a range of sealed blocks from the database, along with the senders of each /// transaction in the blocks. fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; +} + +impl BlockReader for std::sync::Arc { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } +} + +impl BlockReader for &T { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. @@ -160,12 +311,11 @@ pub trait BlockReader: /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the block should be done using the type's `BlockReader` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with matching tag from the database /// /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) } @@ -204,7 +354,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with the matching [`BlockId`] from the database. /// /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> ProviderResult>; + fn block_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the block with senders with matching [`BlockId`]. /// @@ -215,7 +365,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { &self, id: BlockId, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockId::Hash(hash) => { self.block_with_senders(hash.block_hash.into(), transaction_kind) diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index baee2f870a9..9b9c24c6863 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -74,6 +74,7 @@ impl ChainStorageReader { } + /// Ethereum storage implementation. #[derive(Debug, Default, Clone, Copy)] pub struct EthStorage; diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 14001d147f2..da48a0754f9 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -48,7 +48,11 @@ impl EmptyBlockPayloadJobGenerator PayloadJobGenerator for EmptyBlockPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index e46b969adaa..6047da0dd1b 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -24,6 +24,7 @@ use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; +use reth_primitives::EthPrimitives; pub mod generator; pub mod job; @@ -34,7 +35,13 @@ pub struct CustomPayloadBuilder; impl PayloadServiceBuilder for CustomPayloadBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index e2955d01768..1fbf833293d 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -119,7 +119,10 @@ fn txs_provider_example } /// The `BlockReader` allows querying the headers-related tables. -fn block_provider_example(provider: T, number: u64) -> eyre::Result<()> { +fn block_provider_example>( + provider: T, + number: u64, +) -> eyre::Result<()> { // Can query a block by number let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; assert_eq!(block.number, number); diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index e38b6fc24d3..6cc7a4142f5 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -22,7 +22,7 @@ pub struct MyRpcExt { impl MyRpcExtApiServer for MyRpcExt where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt + 'static, { /// Showcasing how to implement a custom rpc method /// using the provider. From 8e4a917ec1aa70b3779083454ff2d5ecf6b44168 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 25 Nov 2024 21:10:49 +0400 Subject: [PATCH 681/970] refactor: isolate `BlockchainTree` setup in `DefaultEngineLauncher` (#12852) --- crates/node/builder/src/launch/common.rs | 99 ++++-------------------- crates/node/builder/src/launch/engine.rs | 12 +-- crates/node/builder/src/launch/mod.rs | 27 ++++++- 3 files changed, 41 insertions(+), 97 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 47ec68ff0d7..a1a2223a470 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -11,10 +11,6 @@ use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - externals::TreeNodeTypes, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, -}; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; @@ -46,10 +42,9 @@ use reth_node_metrics::{ }; use reth_primitives::Head; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, CanonStateNotificationSender, ChainSpecProvider, - ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, - StaticFileProviderFactory, TreeViewer, + providers::{ProviderNodeTypes, StaticFileProvider}, + BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, + ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; @@ -65,27 +60,6 @@ use tokio::sync::{ oneshot, watch, }; -/// Allows to set a tree viewer for a configured blockchain provider. -// TODO: remove this helper trait once the engine revamp is done, the new -// blockchain provider won't require a TreeViewer. -// https://github.com/paradigmxyz/reth/issues/8742 -pub trait WithTree { - /// Setter for tree viewer. - fn set_tree(self, tree: Arc) -> Self; -} - -impl WithTree for BlockchainProvider { - fn set_tree(self, tree: Arc) -> Self { - self.with_tree(tree) - } -} - -impl WithTree for BlockchainProvider2 { - fn set_tree(self, _tree: Arc) -> Self { - self - } -} - /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -610,8 +584,6 @@ where pub fn with_blockchain_db( self, create_blockchain_provider: F, - tree_config: BlockchainTreeConfig, - canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result, WithMeteredProviders>>> where T: FullNodeTypes, @@ -625,8 +597,6 @@ where metrics_sender: self.sync_metrics_tx(), }, blockchain_db, - tree_config, - canon_state_notification_sender, }; let ctx = LaunchContextWith { @@ -643,7 +613,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { @@ -674,16 +644,6 @@ where &self.right().blockchain_db } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - - /// Returns the `CanonStateNotificationSender`. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.right().canon_state_notification_sender.clone() - } - /// Creates a `NodeAdapter` and attaches it to the launch context. pub async fn with_components( self, @@ -712,31 +672,13 @@ where debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; - let consensus: Arc = Arc::new(components.consensus().clone()); - - let tree_externals = TreeExternals::new( - self.provider_factory().clone().with_prune_modes(self.prune_modes()), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, *self.tree_config())? - .with_sync_metrics_tx(self.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(self.canon_state_notification_sender()); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); + let blockchain_db = self.blockchain_db().clone(); + let consensus = Arc::new(components.consensus().clone()); let node_adapter = NodeAdapter { components, task_executor: self.task_executor().clone(), - provider: blockchain_db.clone(), + provider: blockchain_db, }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -747,8 +689,6 @@ where provider_factory: self.provider_factory().clone(), metrics_sender: self.sync_metrics_tx(), }, - blockchain_db, - tree_config: self.right().tree_config, node_adapter, head, consensus, @@ -768,7 +708,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -805,9 +745,14 @@ where &self.right().node_adapter } + /// Returns mutable reference to the configured `NodeAdapter`. + pub fn node_adapter_mut(&mut self) -> &mut NodeAdapter { + &mut self.right_mut().node_adapter + } + /// Returns a reference to the blockchain provider. pub const fn blockchain_db(&self) -> &T::Provider { - &self.right().blockchain_db + &self.node_adapter().provider } /// Returns the initial backfill to sync to at launch. @@ -912,11 +857,6 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - /// Returns the node adapter components. pub const fn components(&self) -> &CB::Components { &self.node_adapter().components @@ -928,10 +868,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: ProviderNodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. @@ -1063,7 +1000,7 @@ pub struct WithMeteredProvider { metrics_sender: UnboundedSender, } -/// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] +/// Helper container to bundle the [`ProviderFactory`], [`FullNodeTypes::Provider`] /// and a metrics sender. #[allow(missing_debug_implementations)] pub struct WithMeteredProviders @@ -1072,8 +1009,6 @@ where { db_provider_container: WithMeteredProvider, blockchain_db: T::Provider, - canon_state_notification_sender: CanonStateNotificationSender, - tree_config: BlockchainTreeConfig, } /// Helper container to bundle the metered providers container and [`NodeAdapter`]. @@ -1084,8 +1019,6 @@ where CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, - tree_config: BlockchainTreeConfig, - blockchain_db: T::Provider, node_adapter: NodeAdapter, head: Head, consensus: Arc, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 6afcace5b15..6a87ff3ce65 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -5,7 +5,6 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, BeaconConsensusEngineHandle, }; -use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; @@ -94,15 +93,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -132,7 +122,7 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider2::new(provider_factory)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; // spawn exexs diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4335073b404..627145d2df7 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -17,7 +17,8 @@ use reth_beacon_consensus::{ BeaconConsensusEngine, }; use reth_blockchain_tree::{ - externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTreeConfig, + externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, + ShareableBlockchainTree, TreeExternals, }; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; @@ -134,7 +135,7 @@ where )); // setup the launch context - let ctx = ctx + let mut ctx = ctx .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? @@ -162,9 +163,29 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider::new(provider_factory, tree)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; + let consensus = Arc::new(ctx.components().consensus().clone()); + + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + ctx.components().block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, tree_config)? + .with_sync_metrics_tx(ctx.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + // spawn exexs let exex_manager_handle = ExExLauncher::new( ctx.head(), From b96c0d98974c7cc55e5731d87c98adbfbd1e096c Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Mon, 25 Nov 2024 20:51:37 +0100 Subject: [PATCH 682/970] docs: clarify installation guide NVMe term (#12853) --- book/installation/installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index ebf6c8ef3f9..1df122d4d44 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -44,13 +44,13 @@ As of April 2024 at block number 19.6M: * Archive Node: At least 2.14TB is required * Full Node: At least 1.13TB is required -NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. +NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > -> It is highly recommended to choose a TLC drive when using NVMe, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). ### CPU From 404f8f877842efa887a1b39e78de2eb6ec97289d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 26 Nov 2024 05:38:46 +0400 Subject: [PATCH 683/970] refactor: unify logic for writing headers (#12858) --- .../src/providers/database/provider.rs | 39 +++++----- crates/storage/provider/src/traits/block.rs | 5 +- crates/storage/provider/src/writer/mod.rs | 72 +------------------ 3 files changed, 29 insertions(+), 87 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 3723e2606dc..5110b067f4a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2749,22 +2749,12 @@ impl BlockWrite fn insert_block( &self, block: SealedBlockWithSenders, - write_transactions_to: StorageLocation, + write_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; let mut durations_recorder = metrics::DurationsRecorder::default(); - self.tx.put::(block_number, block.hash())?; - durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - - // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; - durations_recorder.record_relative(metrics::Action::InsertHeaders); - - self.tx.put::(block.hash(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - // total difficulty let ttd = if block_number == 0 { block.difficulty @@ -2775,8 +2765,26 @@ impl BlockWrite parent_ttd + block.difficulty }; - self.tx.put::(block_number, ttd.into())?; - durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + if write_to.database() { + self.tx.put::(block_number, block.hash())?; + durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); + + // Put header with canonical hashes. + self.tx.put::(block_number, block.header.as_ref().clone())?; + durations_recorder.record_relative(metrics::Action::InsertHeaders); + + self.tx.put::(block_number, ttd.into())?; + durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + } + + if write_to.static_files() { + let mut writer = + self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; + writer.append_header(&block.header, ttd, &block.hash())?; + } + + self.tx.put::(block.hash(), block_number)?; + durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); let mut next_tx_num = self .tx @@ -2805,10 +2813,7 @@ impl BlockWrite next_tx_num += 1; } - self.append_block_bodies( - vec![(block_number, Some(block.block.body))], - write_transactions_to, - )?; + self.append_block_bodies(vec![(block_number, Some(block.block.body))], write_to)?; debug!( target: "providers::db", diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index aec54362656..59a5f9b3f61 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -68,10 +68,13 @@ pub trait BlockWriter: Send + Sync { /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. + /// + /// Accepts [`StorageLocation`] value which specifies where transactions and headers should be + /// written. fn insert_block( &self, block: SealedBlockWithSenders, - write_transactions_to: StorageLocation, + write_to: StorageLocation, ) -> ProviderResult; /// Appends a batch of block bodies extending the canonical chain. This is invoked during diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index e91ea3bea3e..28552514362 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -4,26 +4,22 @@ use crate::{ BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, }; -use alloy_consensus::Header; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_primitives::BlockNumber; use reth_chain_state::ExecutedBlock; use reth_db::{ cursor::DbCursorRO, - models::CompactU256, tables, transaction::{DbTx, DbTxMut}, }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{SealedBlock, StaticFileSegment}; -use reth_stages_types::{StageCheckpoint, StageId}; +use reth_primitives::StaticFileSegment; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, }; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; -use std::{borrow::Borrow, sync::Arc}; -use tracing::{debug, instrument}; +use tracing::debug; mod database; mod static_file; @@ -196,7 +192,6 @@ where let sealed_block = block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); self.database().insert_block(sealed_block, StorageLocation::Both)?; - self.save_header_and_transactions(block.block.clone())?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. @@ -223,35 +218,6 @@ where Ok(()) } - /// Writes the header & transactions to static files, and updates their respective checkpoints - /// on database. - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()) target = "storage")] - fn save_header_and_transactions(&self, block: Arc) -> ProviderResult<()> { - debug!(target: "provider::storage_writer", "Writing headers and transactions."); - - { - let header_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Headers)?; - let mut storage_writer = UnifiedStorageWriter::from(self.database(), header_writer); - let td = storage_writer.append_headers_from_blocks( - block.header().number, - std::iter::once(&(block.header(), block.hash())), - )?; - - debug!(target: "provider::storage_writer", block_num=block.number, "Updating transaction metadata after writing"); - self.database() - .tx_ref() - .put::(block.number, CompactU256(td))?; - self.database() - .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; - } - - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - - Ok(()) - } - /// Removes all block, transaction and receipt data above the given block number from the /// database and static files. This is exclusive, i.e., it only removes blocks above /// `block_number`, and does not remove `block_number`. @@ -323,38 +289,6 @@ where None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), } } - - /// Appends headers to static files, using the - /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the - /// total difficulty of the parent block during header insertion. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Headers segment. - pub fn append_headers_from_blocks( - &mut self, - initial_block_number: BlockNumber, - headers: impl Iterator, - ) -> ProviderResult - where - I: Borrow<(H, B256)>, - H: Borrow
, - { - self.ensure_static_file_segment(StaticFileSegment::Headers)?; - - let mut td = self - .database() - .header_td_by_number(initial_block_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(initial_block_number))?; - - for pair in headers { - let (header, hash) = pair.borrow(); - let header = header.borrow(); - td += header.difficulty; - self.static_file_mut().append_header(header, td, hash)?; - } - - Ok(td) - } } impl From 82eb38312c04bbc08332500fe0b8e0477140f648 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 26 Nov 2024 04:07:43 +0100 Subject: [PATCH 684/970] trie: add unit tests for nibbles (#12758) --- Cargo.lock | 1 + crates/trie/common/Cargo.toml | 27 ++++----- crates/trie/common/src/nibbles.rs | 94 +++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de9f923b41f..18ff0e179a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9392,6 +9392,7 @@ dependencies = [ "reth-primitives-traits", "revm-primitives", "serde", + "serde_json", ] [[package]] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0161fc7ff3d..5b691d6e203 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -40,22 +40,23 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true hash-db = "=0.15.2" plain_hasher = "0.2" +serde_json.workspace = true [features] test-utils = [ - "dep:plain_hasher", - "dep:hash-db", - "arbitrary", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils" + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", ] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "nybbles/arbitrary", - "revm-primitives/arbitrary", - "reth-codecs/arbitrary" + "alloy-trie/arbitrary", + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", ] diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index cf94f135f54..797f194c130 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -120,3 +120,97 @@ impl Compact for StoredNibblesSubKey { (Self(Nibbles::from_nibbles_unchecked(&buf[..len])), &buf[65..]) } } + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + + #[test] + fn test_stored_nibbles_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34, 0x56]); + let stored = StoredNibbles::from(nibbles.clone()); + assert_eq!(stored.0, nibbles); + } + + #[test] + fn test_stored_nibbles_from_vec() { + let bytes = vec![0x12, 0x34, 0x56]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored.0.as_slice(), bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_equality() { + let bytes = vec![0x12, 0x34]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored, *bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_partial_cmp() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let other = vec![0x12, 0x35]; + assert!(stored < *other.as_slice()); + } + + #[test] + fn test_stored_nibbles_to_compact() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(10); + let len = stored.to_compact(&mut buf); + assert_eq!(len, 2); + assert_eq!(buf, &vec![0x12, 0x34][..]); + } + + #[test] + fn test_stored_nibbles_from_compact() { + let buf = vec![0x12, 0x34, 0x56]; + let (stored, remaining) = StoredNibbles::from_compact(&buf, 2); + assert_eq!(stored.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[0x56]); + } + + #[test] + fn test_stored_nibbles_subkey_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(nibbles.clone()); + assert_eq!(subkey.0, nibbles); + } + + #[test] + fn test_stored_nibbles_subkey_to_compact() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(65); + let len = subkey.to_compact(&mut buf); + assert_eq!(len, 65); + assert_eq!(buf[..2], [0x12, 0x34]); + assert_eq!(buf[64], 2); // Length byte + } + + #[test] + fn test_stored_nibbles_subkey_from_compact() { + let mut buf = vec![0x12, 0x34]; + buf.resize(65, 0); + buf[64] = 2; + let (subkey, remaining) = StoredNibblesSubKey::from_compact(&buf, 65); + assert_eq!(subkey.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[] as &[u8]); + } + + #[test] + fn test_serialization_stored_nibbles() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&stored).unwrap(); + let deserialized: StoredNibbles = serde_json::from_str(&serialized).unwrap(); + assert_eq!(stored, deserialized); + } + + #[test] + fn test_serialization_stored_nibbles_subkey() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&subkey).unwrap(); + let deserialized: StoredNibblesSubKey = serde_json::from_str(&serialized).unwrap(); + assert_eq!(subkey, deserialized); + } +} From 96d61dd44b6259315a443f4f74e093a5f1873939 Mon Sep 17 00:00:00 2001 From: kien-rise <157339831+kien-rise@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:21:42 +0700 Subject: [PATCH 685/970] feat: add pending_transactions_max in trait TransactionPool (#12856) --- crates/transaction-pool/src/lib.rs | 7 +++++++ crates/transaction-pool/src/noop.rs | 7 +++++++ crates/transaction-pool/src/pool/mod.rs | 8 ++++++++ crates/transaction-pool/src/traits.rs | 9 +++++++++ 4 files changed, 31 insertions(+) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 3194ebba6f8..2953d397285 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -441,6 +441,13 @@ where self.pool.pending_transactions() } + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.pool.pending_transactions_max(max) + } + fn queued_transactions(&self) -> Vec>> { self.pool.queued_transactions() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index cf2270978ab..68e0f98ef8a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -163,6 +163,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_transactions_max( + &self, + _max: usize, + ) -> Vec>> { + vec![] + } + fn queued_transactions(&self) -> Vec>> { vec![] } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 1a23bf3e07c..69dea90a293 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -678,6 +678,14 @@ where self.get_pool_data().best_transactions_with_attributes(best_transactions_attributes) } + /// Returns only the first `max` transactions in the pending pool. + pub(crate) fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().pending_transactions_iter().take(max).collect() + } + /// Returns all transactions from the pending sub-pool pub(crate) fn pending_transactions(&self) -> Vec>> { self.get_pool_data().pending_transactions() diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9d19105b5da..27272799cdb 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -270,6 +270,15 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: RPC fn pending_transactions(&self) -> Vec>>; + /// Returns first `max` transactions that can be included in the next block. + /// See + /// + /// Consumer: Block production + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>>; + /// Returns all transactions that can be included in _future_ blocks. /// /// This and [Self::pending_transactions] are mutually exclusive. From fae44bf74a4c63818cbcfcc6d00e34359fd74ab3 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 10:29:28 +0100 Subject: [PATCH 686/970] chore(trie): move prefix sets to `reth_trie_common` (#12860) --- Cargo.lock | 1 + crates/trie/common/Cargo.toml | 12 +++++++++--- crates/trie/{trie => common}/benches/prefix_set.rs | 2 +- crates/trie/common/src/lib.rs | 5 +++++ crates/trie/{trie => common}/src/prefix_set.rs | 6 +++--- crates/trie/trie/Cargo.toml | 4 ---- crates/trie/trie/src/lib.rs | 4 ---- 7 files changed, 19 insertions(+), 15 deletions(-) rename crates/trie/{trie => common}/benches/prefix_set.rs (99%) rename crates/trie/{trie => common}/src/prefix_set.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index 18ff0e179a0..8208e2d1e79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9381,6 +9381,7 @@ dependencies = [ "alloy-trie", "arbitrary", "bytes", + "criterion", "derive_more 1.0.0", "hash-db", "itertools 0.13.0", diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 5b691d6e203..fb8ca1642c5 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -35,12 +35,14 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] alloy-primitives = { workspace = true, features = ["getrandom"] } -arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true -proptest-arbitrary-interop.workspace = true +alloy-trie = { workspace = true, features = ["arbitrary"] } hash-db = "=0.15.2" plain_hasher = "0.2" serde_json.workspace = true +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true +proptest-arbitrary-interop.workspace = true +criterion.workspace = true [features] test-utils = [ @@ -60,3 +62,7 @@ arbitrary = [ "revm-primitives/arbitrary", "reth-codecs/arbitrary", ] + +[[bench]] +name = "prefix_set" +harness = false diff --git a/crates/trie/trie/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs similarity index 99% rename from crates/trie/trie/benches/prefix_set.rs rename to crates/trie/common/benches/prefix_set.rs index cae08d129f6..b61d58e0272 100644 --- a/crates/trie/trie/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -7,7 +7,7 @@ use proptest::{ strategy::ValueTree, test_runner::{basic_result_cache, TestRunner}, }; -use reth_trie::{ +use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, Nibbles, }; diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 7645ebd3a1c..6f3cbf3eeae 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -26,6 +26,10 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; +/// The implementation of a container for storing intermediate changes to a trie. +/// The container indicates when the trie has been modified. +pub mod prefix_set; + mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; @@ -33,4 +37,5 @@ pub use proofs::*; pub mod root; +/// Re-export pub use alloy_trie::{nodes::*, proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs similarity index 98% rename from crates/trie/trie/src/prefix_set.rs rename to crates/trie/common/src/prefix_set.rs index d904ef38fdd..1e3567f57d0 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -73,7 +73,7 @@ pub struct TriePrefixSets { /// # Examples /// /// ``` -/// use reth_trie::{prefix_set::PrefixSetMut, Nibbles}; +/// use reth_trie_common::{prefix_set::PrefixSetMut, Nibbles}; /// /// let mut prefix_set_mut = PrefixSetMut::default(); /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb])); @@ -211,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type Item = &'a reth_trie_common::Nibbles; - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; + type Item = &'a Nibbles; + type IntoIter = std::slice::Iter<'a, Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 6136fa8e56b..48f961c3071 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -85,10 +85,6 @@ test-utils = [ "reth-stages-types/test-utils" ] -[[bench]] -name = "prefix_set" -harness = false - [[bench]] name = "hash_post_state" harness = false diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 26bdc751124..73a08d3fc44 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -17,10 +17,6 @@ mod constants; pub use constants::*; -/// The implementation of a container for storing intermediate changes to a trie. -/// The container indicates when the trie has been modified. -pub mod prefix_set; - /// The implementation of forward-only in-memory cursor. pub mod forward_cursor; From 13cd77c935735527e4aeff26cb191a13ef69d244 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 10:57:07 +0100 Subject: [PATCH 687/970] test: enable op prim arbitrary in e2e test-utils (#12862) --- Cargo.lock | 1 + crates/e2e-test-utils/Cargo.toml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 8208e2d1e79..c942516948d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7112,6 +7112,7 @@ dependencies = [ "reth-network-peers", "reth-node-api", "reth-node-builder", + "reth-optimism-primitives", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 4619c357335..77b19085d40 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -28,6 +28,9 @@ reth-stages-types.workspace = true reth-network-peers.workspace = true reth-engine-local.workspace = true +# currently need to enable this for workspace level +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } + # rpc jsonrpsee.workspace = true url.workspace = true From 9d2e04c77efbf92292fde1652e2564153bfc7708 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 11:12:36 +0100 Subject: [PATCH 688/970] chore: relax mempool impl (#12865) --- crates/net/eth-wire-types/src/broadcast.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 7d74085d355..25ce7f3b350 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,15 +1,14 @@ //! Types for broadcasting new data. use crate::{EthMessage, EthVersion, NetworkPrimitives}; +use alloy_primitives::{Bytes, TxHash, B256, U128}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; - -use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; -use reth_primitives::{PooledTransactionsElement, TransactionSigned}; - +use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use std::{ collections::{HashMap, HashSet}, mem, @@ -555,7 +554,7 @@ pub trait HandleVersionedMempoolData { fn msg_version(&self) -> EthVersion; } -impl HandleMempoolData for Vec { +impl HandleMempoolData for Vec { fn is_empty(&self) -> bool { self.is_empty() } @@ -565,7 +564,7 @@ impl HandleMempoolData for Vec { } fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) { - self.retain(|tx| f(tx.hash())) + self.retain(|tx| f(tx.tx_hash())) } } From aa0a114b0df0e12e5ed8c3ba40aafa23845f659e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 11:34:23 +0100 Subject: [PATCH 689/970] chore(trie): introduce `serde` feature on `reth-trie-common` (#12864) --- crates/storage/db/Cargo.toml | 2 +- crates/trie/common/Cargo.toml | 26 +++++++++++++++----- crates/trie/common/src/hash_builder/state.rs | 4 +-- crates/trie/common/src/nibbles.rs | 19 +++----------- crates/trie/common/src/proofs.rs | 9 ++++--- crates/trie/common/src/storage.rs | 4 +-- crates/trie/db/Cargo.toml | 2 +- crates/trie/db/src/prefix_set.rs | 6 +++-- crates/trie/db/src/proof.rs | 7 +++--- crates/trie/db/src/trie_cursor.rs | 3 +-- crates/trie/db/tests/proof.rs | 3 +-- crates/trie/db/tests/trie.rs | 2 +- crates/trie/db/tests/walker.rs | 4 +-- crates/trie/trie/Cargo.toml | 3 ++- 14 files changed, 49 insertions(+), 45 deletions(-) diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 5ff9fb43a3d..62a7cc91068 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -21,8 +21,8 @@ reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-trie-common = { workspace = true, features = ["serde"] } reth-tracing.workspace = true -reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index fb8ca1642c5..47c09d2a7c0 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,21 +12,24 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits = { workspace = true, features = ["serde"] } -reth-codecs.workspace = true - +# alloy alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-trie = { workspace = true, features = ["serde"] } +alloy-trie.workspace = true alloy-consensus.workspace = true alloy-genesis.workspace = true + +reth-primitives-traits.workspace = true +reth-codecs.workspace = true revm-primitives.workspace = true bytes.workspace = true derive_more.workspace = true -serde.workspace = true itertools.workspace = true -nybbles = { workspace = true, features = ["serde", "rlp"] } +nybbles = { workspace = true, features = ["rlp"] } + +# `serde` feature +serde = { workspace = true, optional = true } # `test-utils` feature hash-db = { version = "=0.15.2", optional = true } @@ -45,6 +48,17 @@ proptest-arbitrary-interop.workspace = true criterion.workspace = true [features] +serde = [ + "dep:serde", + "bytes/serde", + "nybbles/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-trie/serde", + "revm-primitives/serde", + "reth-primitives-traits/serde", + "reth-codecs/serde" +] test-utils = [ "dep:plain_hasher", "dep:hash-db", diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index c5cae21a1a3..ee2d1d00c01 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -3,11 +3,11 @@ use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; use bytes::Buf; use nybbles::Nibbles; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 797f194c130..402ba811069 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,24 +1,12 @@ use bytes::Buf; use derive_more::Deref; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - derive_more::Index, -)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, derive_more::Index)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); @@ -74,7 +62,8 @@ impl Compact for StoredNibbles { } /// The representation of nibbles of the merkle trie stored in the database. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 8e014f6d8c6..eabc3a165c7 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -11,7 +11,6 @@ use alloy_trie::{ }; use itertools::Itertools; use reth_primitives_traits::Account; -use serde::{Deserialize, Serialize}; use std::collections::{hash_map, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. @@ -171,8 +170,9 @@ impl StorageMultiProof { } /// The merkle proof with the relevant account info. -#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -227,7 +227,8 @@ impl AccountProof { } /// The merkle proof of the storage entry. -#[derive(Clone, PartialEq, Eq, Default, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Default, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StorageProof { /// The raw storage key. pub key: B256, diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index b61abb11688..07cfde916b4 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,9 +1,9 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// Account storage trie node. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { /// The nibbles of the intermediate node pub nibbles: StoredNibblesSubKey, diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 55fa9a851b1..74f9f98f1bf 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -18,7 +18,6 @@ reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-storage-errors.workspace = true -reth-trie-common.workspace = true reth-trie.workspace = true revm.workspace = true @@ -70,6 +69,7 @@ serde = [ "dep:serde", "reth-provider/serde", "reth-trie/serde", + "reth-trie-common/serde", "alloy-consensus/serde", "alloy-primitives/serde", "revm/serde", diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index cd50503bc70..ac8c3b05304 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -8,8 +8,10 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives::StorageEntry; -use reth_trie::prefix_set::{PrefixSetMut, TriePrefixSets}; -use reth_trie_common::Nibbles; +use reth_trie::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + Nibbles, +}; use std::{ collections::{HashMap, HashSet}, ops::RangeInclusive, diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 9bf08fe136f..4f7c5e1c021 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -10,9 +10,8 @@ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, proof::{Proof, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, - HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, + AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, }; -use reth_trie_common::AccountProof; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { @@ -96,7 +95,7 @@ pub trait DatabaseStorageProof<'a, TX> { address: Address, slot: B256, storage: HashedStorage, - ) -> Result; + ) -> Result; } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> @@ -111,7 +110,7 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> address: Address, slot: B256, storage: HashedStorage, - ) -> Result { + ) -> Result { let hashed_address = keccak256(address); let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index bfded342ba0..b364e9a86f1 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -11,9 +11,8 @@ use reth_storage_errors::db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, updates::StorageTrieUpdates, - BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, + BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; -use reth_trie_common::StorageTrieEntry; /// Wrapper struct for database transaction implementing trie cursor factory trait. #[derive(Debug)] diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 79a2ce96fce..eedeee276db 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -6,8 +6,7 @@ use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; -use reth_trie::{proof::Proof, Nibbles}; -use reth_trie_common::{AccountProof, StorageProof}; +use reth_trie::{proof::Proof, AccountProof, Nibbles, StorageProof}; use reth_trie_db::DatabaseProof; use std::{ str::FromStr, diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index aee26436479..1e5d1a9f26b 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -17,9 +17,9 @@ use reth_provider::{ use reth_trie::{ prefix_set::PrefixSetMut, test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, + triehash::KeccakHasher, BranchNodeCompact, StateRoot, StorageRoot, TrieMask, }; -use reth_trie_common::triehash::KeccakHasher; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; use std::{ collections::{BTreeMap, HashMap}, diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index dd4bcd6da8f..06355ff6d48 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -5,9 +5,9 @@ use reth_db::tables; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ - prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, StorageTrieEntry, + prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, BranchNodeCompact, + Nibbles, StorageTrieEntry, }; -use reth_trie_common::{BranchNodeCompact, Nibbles}; use reth_trie_db::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; #[test] diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 48f961c3071..fd4d80ce7e3 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -70,7 +70,8 @@ serde = [ "alloy-consensus/serde", "alloy-primitives/serde", "revm/serde", - "alloy-trie/serde" + "alloy-trie/serde", + "reth-trie-common/serde" ] serde-bincode-compat = [ "serde_with", From 1b4048e47d42c0053ea9ba216281ceb513d70bc4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 11:44:57 +0100 Subject: [PATCH 690/970] chore: add encodable to pooled recovered (#12866) --- crates/primitives/src/transaction/pooled.rs | 18 ++++++++++++++++++ crates/transaction-pool/src/traits.rs | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 2bd344ea2a1..6f2a7270086 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -739,6 +739,24 @@ impl TryFrom for PooledTransactionsElementEcRecove } } +impl Encodable2718 for PooledTransactionsElementEcRecovered { + fn type_flag(&self) -> Option { + self.transaction.type_flag() + } + + fn encode_2718_len(&self) -> usize { + self.transaction.encode_2718_len() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.encode_2718(out) + } + + fn trie_hash(&self) -> B256 { + self.transaction.trie_hash() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 27272799cdb..102e137591e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -947,7 +947,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { type Consensus: From + TryInto; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: Into; + type Pooled: Encodable2718 + Into; /// Define a method to convert from the `Consensus` type to `Self` fn try_from_consensus(tx: Self::Consensus) -> Result { From a28fa243c0f2d053b543219e6b073435809e615f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 12:44:44 +0100 Subject: [PATCH 691/970] feat: use Pooled type for get_pooled_transaction_element in pool trait (#12867) --- crates/transaction-pool/src/lib.rs | 5 ++++- crates/transaction-pool/src/noop.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 4 ++-- crates/transaction-pool/src/traits.rs | 19 +++++++++++++++++-- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 2953d397285..83e3b78c6b8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -420,7 +420,10 @@ where self.pool.get_pooled_transaction_elements(tx_hashes, limit) } - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option { + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_element(tx_hash) } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 68e0f98ef8a..79901fe18d0 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -142,7 +142,7 @@ impl TransactionPool for NoopTransactionPool { fn get_pooled_transaction_element( &self, _tx_hash: TxHash, - ) -> Option { + ) -> Option<::Pooled> { None } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 69dea90a293..4a035f8e42a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -373,11 +373,11 @@ where pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option + ) -> Option<<::Transaction as PoolTransaction>::Pooled> where ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx).map(Into::into)) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx)) } /// Updates the entire pool after a new block was executed. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 102e137591e..742b95cff38 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -233,15 +233,30 @@ pub trait TransactionPool: Send + Sync + Clone { limit: GetPooledTransactionLimit, ) -> Vec; - /// Returns converted [PooledTransactionsElement] for the given transaction hash. + /// Returns the pooled transaction variant for the given transaction hash. /// /// This adheres to the expected behavior of /// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// /// If the transaction is a blob transaction, the sidecar will be included. /// + /// It is expected that this variant represents the valid p2p format for full transactions. + /// E.g. for EIP-4844 transactions this is the consensus transaction format with the blob + /// sidecar. + /// /// Consumer: P2P - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option; + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<::Pooled>; + + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transaction_as(&self, tx_hash: TxHash) -> Option + where + ::Pooled: Into, + { + self.get_pooled_transaction_element(tx_hash).map(Into::into) + } /// Returns an iterator that yields transactions that are ready for block production. /// From 334ccd0a53f82922404647c3d92ab0f19e52dd9f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 13:00:07 +0100 Subject: [PATCH 692/970] chore: add helper for encoded (#12868) --- crates/primitives-traits/src/encoded.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs index b162fc93343..885031af1b6 100644 --- a/crates/primitives-traits/src/encoded.rs +++ b/crates/primitives-traits/src/encoded.rs @@ -1,3 +1,4 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::Bytes; /// Generic wrapper with encoded Bytes, such as transaction data. @@ -17,8 +18,8 @@ impl WithEncoded { } /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() + pub const fn encoded_bytes(&self) -> &Bytes { + &self.0 } /// Get the underlying value @@ -47,6 +48,13 @@ impl WithEncoded { } } +impl WithEncoded { + /// Wraps the value with the [`Encodable2718::encoded_2718`] bytes. + pub fn from_2718_encodable(value: T) -> Self { + Self(value.encoded_2718().into(), value) + } +} + impl WithEncoded> { /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. pub fn transpose(self) -> Option> { From dab63e649f12f6bcd5285d9c2e0fb83c0a1c5f23 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Tue, 26 Nov 2024 06:00:41 -0600 Subject: [PATCH 693/970] feat: emit node event when inserted executed block (#12726) Co-authored-by: Matthias Seitz --- crates/engine/tree/src/tree/mod.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index f36ab3ea853..9b689d18cfe 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1212,8 +1212,17 @@ where match request { EngineApiRequest::InsertExecutedBlock(block) => { debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); + let now = Instant::now(); + let sealed_block = block.block.clone(); self.state.tree_state.insert_executed(block); self.metrics.engine.inserted_already_executed_blocks.increment(1); + + self.emit_event(EngineApiEvent::BeaconConsensus( + BeaconConsensusEngineEvent::CanonicalBlockAdded( + sealed_block, + now.elapsed(), + ), + )); } EngineApiRequest::Beacon(request) => { match request { @@ -2939,7 +2948,7 @@ mod tests { EngineApiEvent::BeaconConsensus( BeaconConsensusEngineEvent::CanonicalBlockAdded(block, _), ) => { - assert!(block.hash() == expected_hash); + assert_eq!(block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } From b34fb7883aca90e3bb23228a4ae6d0ff575feff9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 13:02:23 +0100 Subject: [PATCH 694/970] feat: make PooledTransactionsElementEcRecovered generic over transaction (#12869) --- crates/primitives/src/transaction/pooled.rs | 37 +++++++++------------ 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 6f2a7270086..145660f44c7 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -667,48 +667,43 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { /// A signed pooled transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { +pub struct PooledTransactionsElementEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - transaction: PooledTransactionsElement, + transaction: T, } -// === impl PooledTransactionsElementEcRecovered === +impl PooledTransactionsElementEcRecovered { + /// Create an instance from the given transaction and the [`Address`] of the signer. + pub const fn from_signed_transaction(transaction: T, signer: Address) -> Self { + Self { transaction, signer } + } -impl PooledTransactionsElementEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } - /// Transform back to [`PooledTransactionsElement`] - pub fn into_transaction(self) -> PooledTransactionsElement { + /// Consume the type and return the transaction + pub fn into_transaction(self) -> T { self.transaction } + /// Dissolve Self to its component + pub fn into_components(self) -> (T, Address) { + (self.transaction, self.signer) + } +} +impl PooledTransactionsElementEcRecovered { /// Transform back to [`TransactionSignedEcRecovered`] pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { let (tx, signer) = self.into_components(); tx.into_ecrecovered_transaction(signer) } - /// Dissolve Self to its component - pub fn into_components(self) -> (PooledTransactionsElement, Address) { - (self.transaction, self.signer) - } - - /// Create [`TransactionSignedEcRecovered`] from [`PooledTransactionsElement`] and [`Address`] - /// of the signer. - pub const fn from_signed_transaction( - transaction: PooledTransactionsElement, - signer: Address, - ) -> Self { - Self { transaction, signer } - } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// @@ -739,7 +734,7 @@ impl TryFrom for PooledTransactionsElementEcRecove } } -impl Encodable2718 for PooledTransactionsElementEcRecovered { +impl Encodable2718 for PooledTransactionsElementEcRecovered { fn type_flag(&self) -> Option { self.transaction.type_flag() } From 26fc701814e4c35abe5a5c8dfd285ca66c5d9255 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 26 Nov 2024 16:06:55 +0400 Subject: [PATCH 695/970] feat: `NodePrimitivesProvider` (#12855) --- Cargo.lock | 3 +- crates/blockchain-tree/src/externals.rs | 24 +++----------- crates/blockchain-tree/src/noop.rs | 8 +++-- crates/blockchain-tree/src/shareable.rs | 12 +++++-- crates/chain-state/Cargo.toml | 1 - crates/chain-state/src/notifications.rs | 19 +++++++++--- crates/chain-state/src/test_utils.rs | 10 ++++-- crates/cli/commands/src/common.rs | 18 ++--------- crates/e2e-test-utils/src/lib.rs | 7 ++--- crates/e2e-test-utils/src/node.rs | 7 +++-- crates/ethereum/node/src/node.rs | 6 ++-- crates/node/builder/src/launch/engine.rs | 5 +-- crates/node/builder/src/launch/exex.rs | 6 ++-- crates/node/builder/src/rpc.rs | 9 ++++-- crates/optimism/node/src/node.rs | 19 +++++++++--- crates/optimism/primitives/Cargo.toml | 2 -- crates/optimism/primitives/src/lib.rs | 31 ++++++++++--------- crates/optimism/rpc/Cargo.toml | 1 + crates/optimism/rpc/src/eth/mod.rs | 7 ++++- crates/prune/prune/src/builder.rs | 4 +-- crates/rpc/rpc-builder/src/eth.rs | 3 +- crates/rpc/rpc-builder/src/lib.rs | 15 +++++---- crates/rpc/rpc-eth-types/src/builder/ctx.rs | 4 ++- crates/rpc/rpc-eth-types/src/fee_history.rs | 7 +++-- crates/rpc/rpc/src/eth/core.rs | 4 ++- crates/rpc/rpc/src/eth/pubsub.rs | 12 +++++-- .../src/providers/blockchain_provider.rs | 20 +++++++----- .../provider/src/providers/consistent.rs | 8 +++-- .../provider/src/providers/database/mod.rs | 8 +++-- .../src/providers/database/provider.rs | 9 ++++-- crates/storage/provider/src/providers/mod.rs | 28 +++++++++++------ .../storage/provider/src/test_utils/noop.rs | 6 ++-- crates/storage/provider/src/traits/block.rs | 29 ++++++++++++++--- crates/storage/provider/src/traits/full.rs | 4 +-- .../src/traits/static_file_provider.rs | 7 ++--- crates/storage/storage-api/src/lib.rs | 3 ++ crates/storage/storage-api/src/primitives.rs | 8 +++++ examples/custom-engine-types/src/main.rs | 6 +++- examples/custom-evm/src/main.rs | 4 +-- examples/custom-node-components/src/main.rs | 3 +- 40 files changed, 239 insertions(+), 148 deletions(-) create mode 100644 crates/storage/storage-api/src/primitives.rs diff --git a/Cargo.lock b/Cargo.lock index c942516948d..004373c70f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6603,7 +6603,6 @@ dependencies = [ "alloy-primitives", "alloy-signer", "alloy-signer-local", - "auto_impl", "derive_more 1.0.0", "metrics", "parking_lot", @@ -8409,7 +8408,6 @@ dependencies = [ "derive_more 1.0.0", "op-alloy-consensus", "reth-codecs", - "reth-node-types", "reth-primitives", "reth-primitives-traits", "rstest", @@ -8444,6 +8442,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-forks", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-primitives", "reth-provider", "reth-rpc", diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index f61de4c4336..d6dc84eee48 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,8 +4,8 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; -use reth_primitives::StaticFileSegment; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::{EthPrimitives, StaticFileSegment}; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes}, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, @@ -15,25 +15,9 @@ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. -pub trait NodeTypesForTree: - NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - >, -> -{ -} +pub trait NodeTypesForTree: NodeTypesForProvider {} -impl NodeTypesForTree for T where - T: NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - >, - > -{ -} +impl NodeTypesForTree for T where T: NodeTypesForProvider {} /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 862b02e7607..f5d2ad8c6f7 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -6,10 +6,10 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, }; use reth_storage_errors::provider::ProviderResult; use std::collections::BTreeMap; @@ -126,6 +126,10 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { } } +impl NodePrimitivesProvider for NoopBlockchainTree { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canon_state_notification_sender diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index f997e0a062d..484b4b51869 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -16,7 +16,7 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -185,10 +185,18 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl NodePrimitivesProvider for ShareableBlockchainTree where N: ProviderNodeTypes, E: Send + Sync, +{ + type Primitives = N::Primitives; +} + +impl CanonStateSubscriptions for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 54f7ac43de1..d2ef5870947 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -35,7 +35,6 @@ tokio-stream = { workspace = true, features = ["sync"] } tracing.workspace = true # misc -auto_impl.workspace = true derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 03d740d3d13..a87d972907e 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,9 +1,9 @@ //! Canonical chain state notification trait and types. -use auto_impl::auto_impl; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_storage_api::NodePrimitivesProvider; use std::{ pin::Pin, sync::Arc, @@ -25,21 +25,30 @@ pub type CanonStateNotificationSender = broadcast::Sender>; /// A type that allows to register chain related event subscriptions. -#[auto_impl(&, Arc)] -pub trait CanonStateSubscriptions: Send + Sync { +pub trait CanonStateSubscriptions: NodePrimitivesProvider + Send + Sync { /// Get notified when a new canonical chain was imported. /// /// A canonical chain be one or more blocks, a reorg or a revert. - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; /// Convenience method to get a stream of [`CanonStateNotification`]. - fn canonical_state_stream(&self) -> CanonStateNotificationStream { + fn canonical_state_stream(&self) -> CanonStateNotificationStream { CanonStateNotificationStream { st: BroadcastStream::new(self.subscribe_to_canonical_state()), } } } +impl CanonStateSubscriptions for &T { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + (*self).subscribe_to_canonical_state() + } + + fn canonical_state_stream(&self) -> CanonStateNotificationStream { + (*self).canonical_state_stream() + } +} + /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 866a6d74a0b..f6b0a4f1772 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -14,9 +14,11 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, NodePrimitives, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; +use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{ @@ -314,6 +316,10 @@ impl TestCanonStateSubscriptions { } } +impl NodePrimitivesProvider for TestCanonStateSubscriptions { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for TestCanonStateSubscriptions { /// Sets up a broadcast channel with a buffer size of 100. fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 4a42d9f29f7..b2ad1452aa4 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -10,12 +10,12 @@ use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; -use reth_node_api::FullNodePrimitives; use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; +use reth_primitives::EthPrimitives; use reth_provider::{ providers::{NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, @@ -196,22 +196,10 @@ impl AccessRights { /// Helper trait with a common set of requirements for the /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. pub trait CliNodeTypes: - NodeTypesWithEngine - + NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - >, - > + NodeTypesWithEngine + NodeTypesForProvider { } impl CliNodeTypes for N where - N: NodeTypesWithEngine - + NodeTypesForProvider< - Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - >, - > + N: NodeTypesWithEngine + NodeTypesForProvider { } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index b9279b16a7f..51951bd4f52 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -6,8 +6,9 @@ use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, blockchain_tree::externals::NodeTypesForTree, - builder::{FullNodePrimitives, NodeBuilder, NodeConfig, NodeHandle}, + builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, + primitives::EthPrimitives, rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; @@ -121,7 +122,7 @@ pub async fn setup_engine( where N: Default + Node>>> - + NodeTypesWithEngine + + NodeTypesWithEngine + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, @@ -134,8 +135,6 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - N::Primitives: - FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index cbe69558e65..ea344c83823 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, pin::Pin}; -use alloy_consensus::{BlockHeader, Sealable}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; @@ -16,8 +16,9 @@ use reth::{ }, }; use reth_chainspec::EthereumHardforks; -use reth_node_api::{Block, FullBlock, NodePrimitives}; +use reth_node_api::Block; use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; +use reth_primitives::EthPrimitives; use reth_stages_types::StageId; use tokio_stream::StreamExt; use url::Url; @@ -56,7 +57,7 @@ where Node::Types: NodeTypesWithEngine< ChainSpec: EthereumHardforks, Engine = Engine, - Primitives: NodePrimitives, + Primitives = EthPrimitives, >, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns, diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index c4e1de2760f..a536b9dff90 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -164,7 +164,7 @@ pub struct EthereumPoolBuilder { impl PoolBuilder for EthereumPoolBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -240,7 +240,7 @@ impl EthereumPayloadBuilder { pool: Pool, ) -> eyre::Result> where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Evm: ConfigureEvm
, Pool: TransactionPool + Unpin + 'static, @@ -278,7 +278,7 @@ impl EthereumPayloadBuilder { impl PayloadServiceBuilder for EthereumPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 6a87ff3ce65..842eae43581 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -28,7 +28,7 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::EthereumHardforks; +use reth_primitives::{EthPrimitives, EthereumHardforks}; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -70,7 +70,8 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, + Types: + ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index a3640690c1d..0eef0d00576 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -10,7 +10,7 @@ use reth_exex::{ DEFAULT_EXEX_MANAGER_CAPACITY, }; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::Head; +use reth_primitives::{EthPrimitives, Head}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use tracing::Instrument; @@ -25,7 +25,9 @@ pub struct ExExLauncher { config_container: WithConfigs<::ChainSpec>, } -impl ExExLauncher { +impl> + Clone> + ExExLauncher +{ /// Create a new `ExExLauncher` with the given extensions. pub const fn new( head: Head, diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index fda8b66f8d7..6e0be36c20e 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -18,6 +18,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; +use reth_primitives::EthPrimitives; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -402,7 +403,7 @@ where impl RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, + Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, @@ -524,7 +525,7 @@ where impl NodeAddOns for RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, + Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, @@ -566,7 +567,9 @@ pub trait EthApiBuilder: 'static { fn build(ctx: &EthApiBuilderCtx) -> Self; } -impl EthApiBuilder for EthApi { +impl>> EthApiBuilder + for EthApi +{ fn build(ctx: &EthApiBuilderCtx) -> Self { Self::with_spawner(ctx) } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 2150b6bafd4..b3036722d79 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -204,13 +204,13 @@ impl NodeTypesWithEngine for OpNode { #[derive(Debug)] pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); -impl Default for OpAddOns { +impl>> Default for OpAddOns { fn default() -> Self { Self::new(None) } } -impl OpAddOns { +impl>> OpAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) @@ -418,7 +418,11 @@ where ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, @@ -453,8 +457,13 @@ where impl PayloadServiceBuilder for OpPayloadBuilder where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, Txs: OpPayloadTransactions, { diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index e7200c40ed8..abd27300fa5 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-node-types.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-codecs = { workspace = true, optional = true, features = ["optimism"] } @@ -47,7 +46,6 @@ default = ["std", "reth-codec"] std = [ "reth-primitives-traits/std", "reth-primitives/std", - "reth-node-types/std", "reth-codecs/std", "alloy-consensus/std", "alloy-eips/std", diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 0f4608a8ebe..796f5cb0613 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,26 +6,29 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; pub mod transaction; +use reth_primitives::EthPrimitives; pub use transaction::{tx_type::OpTxType, OpTransaction}; -use alloy_consensus::Header; -use reth_node_types::NodePrimitives; -use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned}; - /// Optimism primitive types. -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct OpPrimitives; +pub type OpPrimitives = EthPrimitives; -impl NodePrimitives for OpPrimitives { - type Block = Block; - type BlockHeader = Header; - type BlockBody = BlockBody; - type SignedTx = TransactionSigned; - type TxType = OpTxType; - type Receipt = Receipt; -} +// TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` +// implementation used exclusively by legacy engine. +// +// #[derive(Debug, Default, Clone, PartialEq, Eq)] +// pub struct OpPrimitives; +// +// impl NodePrimitives for OpPrimitives { +// type Block = Block; +// type BlockHeader = Header; +// type BlockBody = BlockBody; +// type SignedTx = TransactionSigned; +// type TxType = OpTxType; +// type Receipt = Receipt; +// } diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 17fafef7096..50194f39aa3 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -33,6 +33,7 @@ reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true # ethereum diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 8690d1a262a..6b909f012c5 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,6 +8,7 @@ mod call; mod pending_block; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; @@ -71,7 +72,11 @@ pub struct OpEthApi { impl OpEthApi where N: RpcNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, >, { /// Creates a new instance for given context. diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index d987ed1edb6..8088bd7e12b 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -6,7 +6,7 @@ use reth_db::transaction::DbTxMut; use reth_exex_types::FinishedExExHeight; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - PruneCheckpointWriter, StaticFileProviderFactory, + NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -82,7 +82,7 @@ impl PrunerBuilder { + BlockReader + StaticFileProviderFactory, > + StaticFileProviderFactory< - Primitives = ::Primitives, + Primitives = ::Primitives, >, { let segments = diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 5326a10e463..be25236ff81 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,6 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -35,7 +36,7 @@ where + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { /// Returns a new instance with handlers for `eth` namespace. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0f850f0457a..ddfe173ee1c 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -42,7 +42,8 @@ //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::Consensus + Clone + 'static, @@ -118,7 +119,8 @@ //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, @@ -190,6 +192,7 @@ use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; +use reth_primitives::EthPrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, StateProviderFactory, @@ -264,7 +267,7 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, @@ -617,7 +620,7 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, @@ -920,7 +923,7 @@ where + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, EthApi: EthApiTypes + 'static, BlockExecutor: BlockExecutorProvider, @@ -1282,7 +1285,7 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 2132dd0e22c..66f85f87bf6 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -2,6 +2,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; +use reth_primitives::NodePrimitives; use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; @@ -41,7 +42,8 @@ where where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, - Events: CanonStateSubscriptions, + Events: + CanonStateSubscriptions>, { let fee_history_cache = FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 6c8b66246f3..e01578661f3 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -16,7 +16,7 @@ use futures::{ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives::{Receipt, SealedBlock, TransactionSigned}; +use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; @@ -205,13 +205,14 @@ struct FeeHistoryCacheInner { /// Awaits for new chain events and directly inserts them into the cache so they're available /// immediately before they need to be fetched from disk. -pub async fn fee_history_cache_new_blocks_task( +pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, ) where - St: Stream + Unpin + 'static, + St: Stream> + Unpin + 'static, Provider: BlockReaderIdExt + ChainSpecProvider + 'static, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 3087af52d69..e4efd64f2dd 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -7,6 +7,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, @@ -102,7 +103,8 @@ where ) -> Self where Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions, + Events: + CanonStateSubscriptions>, { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 8ea6d1f87c8..2ff627f737e 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -15,6 +15,7 @@ use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; use reth_rpc_eth_types::logs_utils; @@ -84,7 +85,9 @@ impl EthPubSubApiServer where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions> + + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat + 'static, { @@ -117,7 +120,9 @@ async fn handle_accepted( where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions> + + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat, { @@ -333,7 +338,8 @@ where impl EthPubSubInner where Provider: BlockReader + EvmEnvProvider + 'static, - Events: CanonStateSubscriptions + 'static, + Events: CanonStateSubscriptions> + + 'static, Network: NetworkInfo + 'static, Pool: 'static, { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 385ae67d68f..73554062b25 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -27,14 +27,14 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, + Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, }; use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DBProvider, StorageChangeSetReader}; +use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -150,6 +150,10 @@ impl BlockchainProvider2 { } } +impl NodePrimitivesProvider for BlockchainProvider2 { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider2 { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -165,8 +169,6 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } impl StaticFileProviderFactory for BlockchainProvider2 { - type Primitives = N::Primitives; - fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } @@ -711,8 +713,10 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { +impl> CanonStateSubscriptions + for BlockchainProvider2 +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 0abd23749c2..b788a954134 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -26,7 +26,9 @@ use reth_primitives::{ use reth_primitives_traits::{Block, BlockBody}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; +use reth_storage_api::{ + DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; use revm::{ db::states::PlainStorageRevert, @@ -613,9 +615,11 @@ impl ConsistentProvider { } } -impl StaticFileProviderFactory for ConsistentProvider { +impl NodePrimitivesProvider for ConsistentProvider { type Primitives = N::Primitives; +} +impl StaticFileProviderFactory for ConsistentProvider { fn static_file_provider(&self) -> StaticFileProvider { self.storage_provider.static_file_provider() } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index b2ea8519163..90645de9b5a 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -26,7 +26,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{NodePrimitivesProvider, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -202,6 +202,10 @@ impl ProviderFactory { } } +impl NodePrimitivesProvider for ProviderFactory { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; type Provider = DatabaseProvider<::TX, N>; @@ -217,8 +221,6 @@ impl DatabaseProviderFactory for ProviderFactory { } impl StaticFileProviderFactory for ProviderFactory { - type Primitives = N::Primitives; - /// Returns static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 5110b067f4a..f04cb96691c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -56,7 +56,8 @@ use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - BlockBodyReader, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, + BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ @@ -207,9 +208,11 @@ impl DatabaseProvider { } } -impl StaticFileProviderFactory for DatabaseProvider { +impl NodePrimitivesProvider for DatabaseProvider { type Primitives = N::Primitives; +} +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() @@ -2678,7 +2681,7 @@ impl BlockExecu &self, block: BlockNumber, remove_transactions_from: StorageLocation, - ) -> ProviderResult { + ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; self.unwind_trie_state_range(range.clone())?; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index abd92312715..2b8a9d4ec4d 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -3,9 +3,10 @@ use crate::{ BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, + NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -80,6 +81,7 @@ where BlockHeader = alloy_consensus::Header, BlockBody = reth_primitives::BlockBody, Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, >, >, { @@ -94,6 +96,7 @@ impl NodeTypesForProvider for T where BlockHeader = alloy_consensus::Header, BlockBody = reth_primitives::BlockBody, Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, >, > { @@ -118,7 +121,7 @@ pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Arc, + tree: Arc>, /// Tracks the chain info wrt forkchoice updates chain_info: ChainInfoTracker, } @@ -136,7 +139,7 @@ impl Clone for BlockchainProvider { impl BlockchainProvider { /// Sets the treeviewer for the provider. #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc) -> Self { + pub fn with_tree(mut self, tree: Arc>) -> Self { self.tree = tree; self } @@ -148,7 +151,7 @@ impl BlockchainProvider { /// if it exists. pub fn with_blocks( database: ProviderFactory, - tree: Arc, + tree: Arc>, latest: SealedHeader, finalized: Option, safe: Option, @@ -158,7 +161,10 @@ impl BlockchainProvider { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { + pub fn new( + database: ProviderFactory, + tree: Arc>, + ) -> ProviderResult { let provider = database.provider()?; let best = provider.chain_info()?; let latest_header = provider @@ -225,6 +231,10 @@ where } } +impl NodePrimitivesProvider for BlockchainProvider { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -240,8 +250,6 @@ impl DatabaseProviderFactory for BlockchainProvider { } impl StaticFileProviderFactory for BlockchainProvider { - type Primitives = N::Primitives; - fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } @@ -944,7 +952,7 @@ impl BlockchainTreePendingStateProvider for BlockchainProv } impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 892965fbff2..b1ec85dd689 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -27,7 +27,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{NodePrimitivesProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -559,9 +559,11 @@ impl PruneCheckpointReader for NoopProvider { } } -impl StaticFileProviderFactory for NoopProvider { +impl NodePrimitivesProvider for NoopProvider { type Primitives = EthPrimitives; +} +impl StaticFileProviderFactory for NoopProvider { fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 59a5f9b3f61..be4042fe28f 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,7 +1,9 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodePrimitives; use reth_primitives::SealedBlockWithSenders; +use reth_storage_api::NodePrimitivesProvider; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; @@ -28,9 +30,10 @@ impl StorageLocation { } } -/// BlockExecution Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionWriter: BlockWriter + Send + Sync { +/// `BlockExecution` Writer +pub trait BlockExecutionWriter: + NodePrimitivesProvider> + BlockWriter + Send + Sync +{ /// Take all of the blocks above the provided number and their execution result /// /// The passed block number will stay in the database. @@ -38,7 +41,7 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { &self, block: BlockNumber, remove_transactions_from: StorageLocation, - ) -> ProviderResult; + ) -> ProviderResult>; /// Remove all of the blocks above the provided number and their execution result /// @@ -50,6 +53,24 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { ) -> ProviderResult<()>; } +impl BlockExecutionWriter for &T { + fn take_block_and_execution_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult> { + (*self).take_block_and_execution_above(block, remove_transactions_from) + } + + fn remove_block_and_execution_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + (*self).remove_block_and_execution_above(block, remove_transactions_from) + } +} + /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 2735859e3a8..cb2cbe3438f 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -19,7 +19,7 @@ pub trait FullProvider: + EvmEnvProvider + ChainSpecProvider + ChangeSetReader - + CanonStateSubscriptions + + CanonStateSubscriptions + ForkChoiceSubscriptions + StageCheckpointReader + Clone @@ -37,7 +37,7 @@ impl FullProvider for T where + EvmEnvProvider + ChainSpecProvider + ChangeSetReader - + CanonStateSubscriptions + + CanonStateSubscriptions + ForkChoiceSubscriptions + StageCheckpointReader + Clone diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index d465121fb46..9daab7e5a8f 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,12 +1,9 @@ -use reth_node_types::NodePrimitives; +use reth_storage_api::NodePrimitivesProvider; use crate::providers::StaticFileProvider; /// Static file provider factory. -pub trait StaticFileProviderFactory { - /// The network primitives type [`StaticFileProvider`] is using. - type Primitives: NodePrimitives; - +pub trait StaticFileProviderFactory: NodePrimitivesProvider { /// Create new instance of static file provider. fn static_file_provider(&self) -> StaticFileProvider; } diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index de09e66f128..4c5d2ab02e7 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -70,3 +70,6 @@ pub use stats::*; mod legacy; pub use legacy::*; + +mod primitives; +pub use primitives::*; diff --git a/crates/storage/storage-api/src/primitives.rs b/crates/storage/storage-api/src/primitives.rs new file mode 100644 index 00000000000..ae2a72e6e53 --- /dev/null +++ b/crates/storage/storage-api/src/primitives.rs @@ -0,0 +1,8 @@ +use reth_primitives::NodePrimitives; + +/// Provider implementation that knows configured [`NodePrimitives`]. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait NodePrimitivesProvider { + /// The node primitive types. + type Primitives: NodePrimitives; +} diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index c21e893e05a..cde68ca6d8e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -300,7 +300,11 @@ pub struct CustomPayloadServiceBuilder; impl PayloadServiceBuilder for CustomPayloadServiceBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, { diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 7a5278061f2..b9a4fc26a95 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -33,7 +33,7 @@ use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::TransactionSigned; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::{convert::Infallible, sync::Arc}; @@ -181,7 +181,7 @@ pub struct MyPayloadBuilder { impl PayloadServiceBuilder for MyPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index d00b8a70224..7924aabd869 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -7,6 +7,7 @@ use reth::{ builder::{components::PoolBuilder, BuilderContext, FullNodeTypes}, chainspec::ChainSpec, cli::Cli, + primitives::EthPrimitives, providers::CanonStateSubscriptions, transaction_pool::{ blobstore::InMemoryBlobStore, EthTransactionPool, TransactionValidationTaskExecutor, @@ -47,7 +48,7 @@ pub struct CustomPoolBuilder { /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = EthTransactionPool; From 2dc92880ba220b0e8b28e88edb7c6aa3429f9f6b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 13:09:12 +0100 Subject: [PATCH 696/970] chore(trie): move trie updates to `reth-trie-common` (#12863) --- Cargo.lock | 6 +- crates/engine/invalid-block-hooks/Cargo.toml | 2 +- crates/evm/execution-types/Cargo.toml | 10 ++- crates/evm/execution-types/src/chain.rs | 10 +-- crates/primitives/Cargo.toml | 5 +- crates/revm/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 14 ++-- crates/trie/common/Cargo.toml | 9 +++ crates/trie/common/src/lib.rs | 14 ++++ crates/trie/{trie => common}/src/updates.rs | 78 ++++++++++---------- crates/trie/db/Cargo.toml | 12 +-- crates/trie/parallel/src/root.rs | 7 +- crates/trie/sparse/benches/root.rs | 2 +- crates/trie/trie/Cargo.toml | 21 +----- crates/trie/trie/src/lib.rs | 14 ---- crates/trie/trie/src/trie.rs | 10 +-- crates/trie/trie/src/walker.rs | 9 ++- 17 files changed, 112 insertions(+), 113 deletions(-) rename crates/trie/{trie => common}/src/updates.rs (93%) diff --git a/Cargo.lock b/Cargo.lock index 004373c70f8..cb675b25f87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7573,6 +7573,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "reth-trie", + "reth-trie-common", "revm", "serde", "serde_with", @@ -9349,7 +9350,6 @@ dependencies = [ "alloy-rlp", "alloy-trie", "auto_impl", - "bincode", "criterion", "itertools 0.13.0", "metrics", @@ -9363,9 +9363,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", - "serde", "serde_json", - "serde_with", "tracing", "triehash", ] @@ -9380,6 +9378,7 @@ dependencies = [ "alloy-rlp", "alloy-trie", "arbitrary", + "bincode", "bytes", "criterion", "derive_more 1.0.0", @@ -9394,6 +9393,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", + "serde_with", ] [[package]] diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index 462f0762a9e..e5eb998dc1f 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -20,7 +20,7 @@ reth-provider.workspace = true reth-revm = { workspace = true, features = ["serde"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-tracing.workspace = true -reth-trie = { workspace = true, features = ["serde"] } +reth-trie.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 4d2d8214ff9..5eb3cc38437 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,9 +12,10 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-execution-errors.workspace = true +reth-trie-common.workspace = true reth-trie.workspace = true -reth-primitives-traits.workspace = true revm.workspace = true @@ -36,17 +37,18 @@ default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] serde = [ "dep:serde", - "reth-trie/serde", + "rand/serde", "revm/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde", "reth-primitives-traits/serde", + "reth-trie-common/serde", + "reth-trie/serde", ] serde-bincode-compat = [ "reth-primitives/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "reth-trie/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", ] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 6aed8422bc3..e09b87ed680 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -11,7 +11,7 @@ use reth_primitives::{ TransactionSignedEcRecovered, }; use reth_primitives_traits::NodePrimitives; -use reth_trie::updates::TrieUpdates; +use reth_trie_common::updates::TrieUpdates; use revm::db::BundleState; /// A chain of blocks and their final state. @@ -513,16 +513,14 @@ pub enum ChainSplit { /// Bincode-compatible [`Chain`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use std::collections::BTreeMap; - + use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; - use reth_trie::serde_bincode_compat::updates::TrieUpdates; + use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - - use crate::ExecutionOutcome; + use std::collections::BTreeMap; /// Bincode-compatible [`super::Chain`] serde implementation. /// diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index ebfa26aef0a..9787c9f3a6a 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -158,11 +158,12 @@ test-utils = [ "arbitrary", ] serde-bincode-compat = [ + "serde_with", + "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", "op-alloy-consensus?/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "serde_with", - "alloy-eips/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", ] [[bench]] diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 4bc78b7b056..95def23a443 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -55,9 +55,9 @@ test-utils = [ ] serde = [ "revm/serde", - "reth-trie?/serde", "alloy-eips/serde", "alloy-primitives/serde", "alloy-consensus/serde", "reth-primitives-traits/serde", + "reth-trie?/serde", ] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 86f2f3a51b9..2875b91149c 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -98,21 +98,21 @@ optimism = [ "revm/optimism", ] serde = [ - "reth-execution-types/serde", - "reth-trie-db/serde", - "reth-trie/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine/serde", "dashmap/serde", "notify/serde", "parking_lot/serde", "rand/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-rpc-types-engine/serde", "revm/serde", "reth-codecs/serde", "reth-optimism-primitives?/serde", "reth-primitives-traits/serde", + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", ] test-utils = [ "reth-db/test-utils", diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 47c09d2a7c0..29993ab13dd 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -31,6 +31,9 @@ nybbles = { workspace = true, features = ["rlp"] } # `serde` feature serde = { workspace = true, optional = true } +# `serde-bincode-compat` feature +serde_with = { workspace = true, optional = true } + # `test-utils` feature hash-db = { version = "=0.15.2", optional = true } plain_hasher = { version = "0.2", optional = true } @@ -46,6 +49,7 @@ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true criterion.workspace = true +bincode.workspace = true [features] serde = [ @@ -59,6 +63,11 @@ serde = [ "reth-primitives-traits/serde", "reth-codecs/serde" ] +serde-bincode-compat = [ + "serde_with", + "reth-primitives-traits/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat" +] test-utils = [ "dep:plain_hasher", "dep:hash-db", diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 6f3cbf3eeae..04b817aab8f 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -37,5 +37,19 @@ pub use proofs::*; pub mod root; +/// Buffer for trie updates. +pub mod updates; + +/// Bincode-compatible serde implementations for trie types. +/// +/// `bincode` crate allows for more efficient serialization of trie types, because it allows +/// non-string map keys. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::updates::serde_bincode_compat as updates; +} + /// Re-export pub use alloy_trie::{nodes::*, proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/common/src/updates.rs similarity index 93% rename from crates/trie/trie/src/updates.rs rename to crates/trie/common/src/updates.rs index e7bc490647c..76aa37a4778 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,4 +1,4 @@ -use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; +use crate::{BranchNodeCompact, HashBuilder, Nibbles}; use alloy_primitives::B256; use std::collections::{HashMap, HashSet}; @@ -78,20 +78,19 @@ impl TrieUpdates { } /// Finalize state trie updates. - pub fn finalize( + pub fn finalize( &mut self, - walker: TrieWalker, hash_builder: HashBuilder, + removed_keys: HashSet, destroyed_accounts: HashSet, ) { - // Retrieve deleted keys from trie walker. - let (_, removed_node_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_node_keys)); - // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.account_nodes.extend(exclude_empty_from_pair(updated_nodes)); + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); + // Add deleted storage tries for destroyed accounts. for destroyed in destroyed_accounts { self.storage_tries.entry(destroyed).or_default().set_deleted(true); @@ -201,14 +200,13 @@ impl StorageTrieUpdates { } /// Finalize storage trie updates for by taking updates from walker and hash builder. - pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { - // Retrieve deleted keys from trie walker. - let (_, removed_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_keys)); - + pub fn finalize(&mut self, hash_builder: HashBuilder, removed_keys: HashSet) { // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.storage_nodes.extend(exclude_empty_from_pair(updated_nodes)); + + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); } /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. @@ -229,10 +227,9 @@ impl StorageTrieUpdates { /// This also sorts the set before serializing. #[cfg(feature = "serde")] mod serde_nibbles_set { - use std::collections::HashSet; - - use reth_trie_common::Nibbles; + use crate::Nibbles; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; + use std::collections::HashSet; pub(super) fn serialize(map: &HashSet, serializer: S) -> Result where @@ -266,15 +263,14 @@ mod serde_nibbles_set { /// This also sorts the map's keys before encoding and serializing. #[cfg(feature = "serde")] mod serde_nibbles_map { - use std::{collections::HashMap, marker::PhantomData}; - + use crate::Nibbles; use alloy_primitives::hex; - use reth_trie_common::Nibbles; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer, }; + use std::{collections::HashMap, marker::PhantomData}; pub(super) fn serialize( map: &HashMap, @@ -340,9 +336,13 @@ mod serde_nibbles_map { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct TrieUpdatesSorted { - pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Sorted collection of updated state nodes with corresponding paths. + pub account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed state node keys. + pub removed_nodes: HashSet, + /// Storage tries storage stored by hashed address of the account + /// the trie belongs to. + pub storage_tries: HashMap, } impl TrieUpdatesSorted { @@ -365,9 +365,12 @@ impl TrieUpdatesSorted { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct StorageTrieUpdatesSorted { - pub(crate) is_deleted: bool, - pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, + /// Flag indicating whether the trie has been deleted/wiped. + pub is_deleted: bool, + /// Sorted collection of updated storage nodes with corresponding paths. + pub storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed storage node keys. + pub removed_nodes: HashSet, } impl StorageTrieUpdatesSorted { @@ -402,21 +405,20 @@ fn exclude_empty_from_pair( /// Bincode-compatible trie updates type serde implementations. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub mod serde_bincode_compat { + use crate::{BranchNodeCompact, Nibbles}; + use alloy_primitives::B256; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; use std::{ borrow::Cow, collections::{HashMap, HashSet}, }; - use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - use serde_with::{DeserializeAs, SerializeAs}; - /// Bincode-compatible [`super::TrieUpdates`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::TrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::TrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -480,7 +482,7 @@ pub mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::StorageTrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::StorageTrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -541,12 +543,12 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { - use crate::updates::StorageTrieUpdates; - - use super::super::{serde_bincode_compat, TrieUpdates}; - + use crate::{ + serde_bincode_compat, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, + }; use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -555,7 +557,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::TrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::TrieUpdates")] trie_updates: TrieUpdates, } @@ -588,7 +590,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::StorageTrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::StorageTrieUpdates")] trie_updates: StorageTrieUpdates, } diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 74f9f98f1bf..2fbdf1d5756 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -67,16 +67,17 @@ similar-asserts.workspace = true metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] serde = [ "dep:serde", - "reth-provider/serde", - "reth-trie/serde", - "reth-trie-common/serde", + "similar-asserts/serde", + "revm/serde", "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde", - "similar-asserts/serde" + "reth-trie/serde", + "reth-trie-common/serde", + "reth-provider/serde", ] test-utils = [ "triehash", + "revm/test-utils", "reth-trie-common/test-utils", "reth-chainspec/test-utils", "reth-primitives/test-utils", @@ -84,5 +85,4 @@ test-utils = [ "reth-db-api/test-utils", "reth-provider/test-utils", "reth-trie/test-utils", - "revm/test-utils" ] diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 7a316d8b15f..b4e300c7290 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -193,11 +193,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); let stats = tracker.finish(); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 30ce566fb5f..d8d210c1b19 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -146,7 +146,7 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { hb.root(); if storage_updates.peek().is_some() { - trie_updates.finalize(node_iter.walker, hb); + trie_updates.finalize(hb, node_iter.walker.take_removed_keys()); } } }, diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index fd4d80ce7e3..c1c3ae4dd87 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -42,12 +42,6 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { version = "0.8", optional = true } -# `serde` feature -serde = { workspace = true, optional = true } - -# `serde-bincode-compat` feature -serde_with = { workspace = true, optional = true } - [dev-dependencies] # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -61,28 +55,21 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true criterion.workspace = true -bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] serde = [ - "dep:serde", - "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde", + "alloy-consensus/serde", "alloy-trie/serde", + "revm/serde", "reth-trie-common/serde" ] -serde-bincode-compat = [ - "serde_with", - "reth-primitives/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat" -] test-utils = [ "triehash", - "reth-trie-common/test-utils", - "reth-primitives/test-utils", "revm/test-utils", + "reth-primitives/test-utils", + "reth-trie-common/test-utils", "reth-stages-types/test-utils" ] diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 73a08d3fc44..335711b8d88 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -50,9 +50,6 @@ pub mod witness; mod trie; pub use trie::{StateRoot, StorageRoot}; -/// Buffer for trie updates. -pub mod updates; - /// Utilities for state root checkpoint progress. mod progress; pub use progress::{IntermediateStateRootState, StateRootProgress}; @@ -63,17 +60,6 @@ pub mod stats; // re-export for convenience pub use reth_trie_common::*; -/// Bincode-compatible serde implementations for trie types. -/// -/// `bincode` crate allows for more efficient serialization of trie types, because it allows -/// non-string map keys. -/// -/// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] -pub mod serde_bincode_compat { - pub use super::updates::serde_bincode_compat as updates; -} - /// Trie calculation metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 74faf7bbc60..28517b23e90 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -258,11 +258,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - self.prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -434,7 +431,8 @@ where let root = hash_builder.root(); let mut trie_updates = StorageTrieUpdates::default(); - trie_updates.finalize(storage_node_iter.walker, hash_builder); + let removed_keys = storage_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys); let stats = tracker.finish(); diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 774fa64a0ef..146dbc213e5 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -58,8 +58,13 @@ impl TrieWalker { /// Split the walker into stack and trie updates. pub fn split(mut self) -> (Vec, HashSet) { - let keys = self.removed_keys.take(); - (self.stack, keys.unwrap_or_default()) + let keys = self.take_removed_keys(); + (self.stack, keys) + } + + /// Take removed keys from the walker. + pub fn take_removed_keys(&mut self) -> HashSet { + self.removed_keys.take().unwrap_or_default() } /// Prints the current stack of trie nodes. From 2b4fa3bbf1403505a4a27b818ff4d89471627e28 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 13:53:52 +0100 Subject: [PATCH 697/970] dep(trie): remove `reth-trie` dep from `reth-trie-sparse` (#12872) --- crates/trie/sparse/Cargo.toml | 1 - crates/trie/sparse/src/errors.rs | 2 +- crates/trie/sparse/src/state.rs | 2 +- crates/trie/sparse/src/trie.rs | 9 +++------ 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 4c64bf716de..dce232fcd57 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,6 @@ workspace = true # reth reth-tracing.workspace = true reth-trie-common.workspace = true -reth-trie.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index 506b206fdd7..a38a92395d9 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -1,7 +1,7 @@ //! Errors for sparse trie. use alloy_primitives::{Bytes, B256}; -use reth_trie::Nibbles; +use reth_trie_common::Nibbles; use thiserror::Error; use crate::SparseNode; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index d7e2f27b974..ec88abfd19e 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -6,7 +6,7 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::Decodable; -use reth_trie::{ +use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, Nibbles, TrieNode, }; diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index bab166d7831..7797069759e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -6,13 +6,10 @@ use alloy_primitives::{ }; use alloy_rlp::Decodable; use reth_tracing::tracing::debug; -use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut}, - BranchNodeCompact, RlpNode, -}; use reth_trie_common::{ - BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, - EMPTY_ROOT_HASH, + prefix_set::{PrefixSet, PrefixSetMut}, + BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, + TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; use std::{borrow::Cow, fmt}; From c19b8e1a2bb239f5af7565430dee995cb732708c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 13:57:58 +0100 Subject: [PATCH 698/970] chore(trie): remove todo comment (#12875) --- crates/trie/sparse/src/trie.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 7797069759e..21f1cf410aa 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -173,7 +173,6 @@ impl RevealedSparseTrie { /// Reveal the trie node only if it was not known already. pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { - // TODO: revise all inserts to not overwrite existing entries match node { TrieNode::EmptyRoot => { debug_assert!(path.is_empty()); From 277631092dad755f4ecb3d1628ad66c2829806b4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 14:00:50 +0100 Subject: [PATCH 699/970] feat: use pooled AT for get_pooled_transactions (#12876) --- crates/net/network/src/transactions/mod.rs | 8 ++++++-- crates/transaction-pool/src/lib.rs | 14 ++++++++++++-- crates/transaction-pool/src/noop.rs | 17 ++++++++++++++--- crates/transaction-pool/src/pool/mod.rs | 20 ++++++++++++++++---- crates/transaction-pool/src/traits.rs | 11 ++++++++++- 5 files changed, 58 insertions(+), 12 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9628dbb4f1b..6e0a3b2f327 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -961,6 +961,8 @@ impl TransactionsManager where Pool: TransactionPool + 'static, <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, { /// Request handler for an incoming request for transactions fn on_get_pooled_transactions( @@ -974,14 +976,14 @@ where let _ = response.send(Ok(PooledTransactions::default())); return } - let transactions = self.pool.get_pooled_transaction_elements( + let transactions = self.pool.get_pooled_transactions_as::( request.0, GetPooledTransactionLimit::ResponseSizeSoftLimit( self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, ), ); - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| tx.tx_hash()), "Sending requested transactions to peer"); // we sent a response at which point we assume that the peer is aware of the // transactions @@ -1291,6 +1293,8 @@ impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, { type Output = (); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 83e3b78c6b8..1c383e8edf0 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,6 @@ use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::PooledTransactionsElement; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -416,10 +415,21 @@ where &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_elements(tx_hashes, limit) } + fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Pooled: Into

, + { + self.pool.get_pooled_transactions_as(tx_hashes, limit) + } + fn get_pooled_transaction_element( &self, tx_hash: TxHash, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 79901fe18d0..3a068d3a593 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -13,8 +13,8 @@ use crate::{ validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, - TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use alloy_eips::{ eip1559::ETHEREUM_BLOCK_GAS_LIMIT, @@ -135,7 +135,18 @@ impl TransactionPool for NoopTransactionPool { &self, _tx_hashes: Vec, _limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<::Pooled> { + vec![] + } + + fn get_pooled_transactions_as( + &self, + _tx_hashes: Vec, + _limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into, + { vec![] } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 4a035f8e42a..b5391b6e8d7 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -88,7 +88,6 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; -use reth_primitives::PooledTransactionsElement; use std::{ collections::{HashMap, HashSet}, fmt, @@ -340,14 +339,27 @@ where } } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. + /// Returns pooled transactions for the given transaction hashes. pub(crate) fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> where ::Transaction: EthPoolTransaction, + { + self.get_pooled_transactions_as(tx_hashes, limit) + } + + /// Returns pooled transactions for the given transaction hashes as the requested type. + pub(crate) fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Transaction: EthPoolTransaction, + <::Transaction as PoolTransaction>::Pooled: Into

, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); @@ -369,7 +381,7 @@ where elements } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hash. + /// Returns converted pooled transaction for the given transaction hash. pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 742b95cff38..2973e36f7a6 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -231,7 +231,16 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec; + ) -> Vec<::Pooled>; + + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transactions_as( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into; /// Returns the pooled transaction variant for the given transaction hash. /// From 2840b6f677b9cf4602c4e78e349d747d7df5e5d4 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Tue, 26 Nov 2024 20:44:12 +0700 Subject: [PATCH 700/970] chore: make OrderedSealedBlock generic over header and body types (#12830) --- Cargo.lock | 1 + crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/sync.rs | 19 ++++++++++++++----- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb675b25f87..276ccdbe1dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6489,6 +6489,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 65994557c06..a7e32684839 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -17,6 +17,7 @@ reth-blockchain-tree-api.workspace = true reth-codecs.workspace = true reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index b140846981e..861aeebf1eb 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -4,13 +4,14 @@ use crate::{ engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, EthBlockClient, }; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock}; +use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -345,17 +346,25 @@ where /// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); +struct OrderedSealedBlock(SealedBlock); -impl PartialOrd for OrderedSealedBlock { +impl PartialOrd for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlock { +impl Ord for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn cmp(&self, other: &Self) -> Ordering { - self.0.number.cmp(&other.0.number) + self.0.number().cmp(&other.0.number()) } } From b6d6bf5eed6c1fc2bb8fdb4362b6e91ae90ad367 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 26 Nov 2024 13:47:06 +0000 Subject: [PATCH 701/970] deps: bump sysinfo (#12874) --- Cargo.lock | 8 ++++---- crates/storage/db/Cargo.toml | 2 +- crates/storage/db/src/lockfile.rs | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 276ccdbe1dd..7db867d5545 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4590,9 +4590,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.165" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e" [[package]] name = "libloading" @@ -10603,9 +10603,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.31.4" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" +checksum = "4c33cd241af0f2e9e3b5c32163b873b29956890b5342e6745b917ce9d490f4af" dependencies = [ "core-foundation-sys", "libc", diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 62a7cc91068..4a4eff47123 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -49,7 +49,7 @@ thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true rustc-hash = { workspace = true, optional = true } -sysinfo = { version = "0.31", default-features = false, features = ["system"] } +sysinfo = { version = "0.32", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } # arbitrary utils diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index a87ab7393f1..b28a83f11ca 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -110,6 +110,7 @@ impl ProcessUID { let pid2 = sysinfo::Pid::from(pid); system.refresh_processes_specifics( sysinfo::ProcessesToUpdate::Some(&[pid2]), + true, ProcessRefreshKind::new(), ); system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) From a0f99df21bff52194d8e24d889e10e6a1514f05f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 26 Nov 2024 15:01:31 +0100 Subject: [PATCH 702/970] feat(trie): `StorageRootProvider::storage_multiproof` (#12879) --- crates/chain-state/src/in_memory.rs | 13 +++++++- crates/chain-state/src/memory_overlay.rs | 17 +++++++++- crates/revm/src/test_utils.rs | 13 ++++++-- crates/rpc/rpc-eth-types/src/cache/db.rs | 9 ++++++ .../src/providers/bundle_state_provider.rs | 14 +++++++- .../src/providers/state/historical.rs | 15 ++++++++- .../provider/src/providers/state/latest.rs | 13 +++++++- .../provider/src/providers/state/macros.rs | 1 + .../storage/provider/src/test_utils/mock.rs | 13 ++++++-- .../storage/provider/src/test_utils/noop.rs | 9 ++++++ crates/storage/storage-api/src/trie.rs | 11 ++++++- crates/trie/db/src/proof.rs | 32 ++++++++++++++++++- 12 files changed, 149 insertions(+), 11 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 933439a7c13..5bb3911f9f3 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -944,7 +944,9 @@ mod tests { AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; - use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; + use reth_trie::{ + AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, + }; fn create_mock_state( test_block_builder: &mut TestBlockBuilder, @@ -1054,6 +1056,15 @@ mod tests { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockStateProvider { diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 88cd411d38b..8bc4ada9e8d 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -11,7 +11,8 @@ use reth_storage_api::{ StorageRootProvider, }; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; use std::sync::OnceLock; @@ -167,6 +168,20 @@ macro_rules! impl_state_provider { hashed_storage.extend(&storage); self.historical.storage_proof(address, slot, hashed_storage) } + + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_multiproof(address, slots, hashed_storage) + } } impl $($tokens)* StateProofProvider for $type { diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 813997c72d1..443d1d5ebcf 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -11,8 +11,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -112,6 +112,15 @@ impl StorageRootProvider for StateProviderTest { ) -> ProviderResult { unimplemented!("proof generation is not supported") } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } } impl StateProofProvider for StateProviderTest { diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 50fd4b04625..1fbe16a2ed9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -67,6 +67,15 @@ impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> ) -> ProviderResult { self.0.storage_proof(address, slot, hashed_storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.0.storage_multiproof(address, slots, hashed_storage) + } } impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index be6549033cd..652f6fb33fd 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -9,7 +9,8 @@ use reth_primitives::{Account, Bytecode}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -138,6 +139,17 @@ impl StorageRootProvider storage.extend(&hashed_storage); self.state_provider.storage_proof(address, slot, storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut storage = self.get_hashed_storage(address); + storage.extend(&hashed_storage); + self.state_provider.storage_multiproof(address, slots, storage) + } } impl StateProofProvider diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 29ba70e2049..ca036844f65 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -21,7 +21,8 @@ use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, @@ -341,6 +342,18 @@ impl StorageRootProvider StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) .map_err(Into::::into) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut revert_storage = self.revert_storage(address)?; + revert_storage.extend(&hashed_storage); + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, revert_storage) + .map_err(Into::::into) + } } impl StateProofProvider diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 297217acece..67dd1e74471 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -15,7 +15,8 @@ use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, @@ -108,6 +109,16 @@ impl StorageRootProvider for LatestStateProviderRef<'_, Pr StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) .map_err(Into::::into) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, hashed_storage) + .map_err(Into::::into) + } } impl StateProofProvider for LatestStateProviderRef<'_, Provider> { diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index b90924354c4..f2648fb15e6 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -50,6 +50,7 @@ macro_rules! delegate_provider_impls { StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index a99b85af904..c95ba0ed7cb 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -32,8 +32,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -639,6 +639,15 @@ impl StorageRootProvider for MockEthProvider { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index b1ec85dd689..7f427b9305a 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -372,6 +372,15 @@ impl StorageRootProvider for NoopProvider { ) -> ProviderResult { Ok(reth_trie::StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } } impl StateProofProvider for NoopProvider { diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index c8f12da0716..d63f6037439 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -5,7 +5,8 @@ use alloy_primitives::{ use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::{StorageTrieUpdates, TrieUpdates}, - AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, StorageProof, + TrieInput, }; /// A type that can compute the state root of a given post state. @@ -56,6 +57,14 @@ pub trait StorageRootProvider: Send + Sync { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult; + + /// Returns the storage multiproof for target slots. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult; } /// A type that can generate state proof on top of a given post state. diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 4f7c5e1c021..dd00f6eda9c 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -10,7 +10,7 @@ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, proof::{Proof, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, - AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, + AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; /// Extends [`Proof`] with operations specific for working with a database transaction. @@ -96,6 +96,14 @@ pub trait DatabaseStorageProof<'a, TX> { slot: B256, storage: HashedStorage, ) -> Result; + + /// Generates the storage multiproof for target slots based on [`TrieInput`]. + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result; } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> @@ -125,4 +133,26 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> .with_prefix_set_mut(prefix_set) .storage_proof(slot) } + + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result { + let hashed_address = keccak256(address); + let targets = slots.iter().map(keccak256).collect(); + let prefix_set = storage.construct_prefix_set(); + let state_sorted = HashedPostStateSorted::new( + Default::default(), + HashMap::from([(hashed_address, storage.into_sorted())]), + ); + Self::from_tx(tx, address) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_set_mut(prefix_set) + .storage_multiproof(targets) + } } From dee0b8c055db3d1fbbb5cf3a98f2476117c118be Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 15:23:27 +0100 Subject: [PATCH 703/970] feat: relax constraints for on_get_pooled_txs (#12880) --- crates/net/network/src/transactions/mod.rs | 28 ++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 6e0a3b2f327..227e812fc74 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -681,9 +681,13 @@ where impl TransactionsManager where Pool: TransactionPool, - N: NetworkPrimitives, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: Into, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -955,28 +959,20 @@ where // notify pool so events get fired self.pool.on_propagated(propagated); } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, - <::Transaction as PoolTransaction>::Pooled: - Into, -{ /// Request handler for an incoming request for transactions fn on_get_pooled_transactions( &mut self, peer_id: PeerId, request: GetPooledTransactions, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { if let Some(peer) = self.peers.get_mut(&peer_id) { if self.network.tx_gossip_disabled() { let _ = response.send(Ok(PooledTransactions::default())); return } - let transactions = self.pool.get_pooled_transactions_as::( + let transactions = self.pool.get_pooled_transactions_as::( request.0, GetPooledTransactionLimit::ResponseSizeSoftLimit( self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, @@ -987,13 +983,21 @@ where // we sent a response at which point we assume that the peer is aware of the // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.tx_hash())); let resp = PooledTransactions(transactions); let _ = response.send(Ok(resp)); } } +} +impl TransactionsManager +where + Pool: TransactionPool + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, +{ /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { From 2d6b8937c351c173d56929ac45ae9810df33c8f7 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 26 Nov 2024 18:24:40 +0400 Subject: [PATCH 704/970] refactor: unify logic for writing receipts (#12878) --- .../commands/debug_cmd/in_memory_merkle.rs | 15 +- bin/reth/src/commands/debug_cmd/merkle.rs | 13 +- .../cli/src/commands/import_receipts.rs | 12 +- crates/stages/stages/src/stages/execution.rs | 17 +- crates/storage/db-common/src/init.rs | 14 +- .../src/providers/blockchain_provider.rs | 22 +- .../src/providers/database/provider.rs | 82 ++++++- crates/storage/provider/src/traits/state.rs | 5 +- .../storage/provider/src/writer/database.rs | 29 --- crates/storage/provider/src/writer/mod.rs | 225 ++---------------- .../provider/src/writer/static_file.rs | 30 --- crates/storage/storage-api/src/receipts.rs | 19 +- 12 files changed, 151 insertions(+), 332 deletions(-) delete mode 100644 crates/storage/provider/src/writer/database.rs delete mode 100644 crates/storage/provider/src/writer/static_file.rs diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 9f82ef0574b..dec96b50408 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -22,9 +22,9 @@ use reth_network_api::NetworkInfo; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockExt; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, AccountExtReader, - ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderFactory, StageCheckpointReader, StateWriter, StorageReader, + providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, + HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, + StageCheckpointReader, StateWriter, StorageLocation, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -163,7 +163,7 @@ impl> Command { return Ok(()) } - let provider_rw = provider_factory.provider_rw()?; + let provider_rw = provider_factory.database_provider_rw()?; // Insert block, state and hashes provider_rw.insert_historical_block( @@ -172,8 +172,11 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw.0); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + provider_rw.write_to_storage( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::Database, + )?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index fe9b76d8c7d..7e7579f9928 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -20,9 +20,9 @@ use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::BlockTy; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, - ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, StorageLocation, + providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, + DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -158,8 +158,11 @@ impl> Command { executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider_rw.write_to_storage( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; let checkpoint = Some(StageCheckpoint::new( block_number diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 049e160ae23..59d596685de 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -19,7 +19,7 @@ use reth_primitives::Receipts; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, + StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -219,11 +219,11 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from( - &provider, - static_file_provider.latest_writer(StaticFileSegment::Receipts)?, - ); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_to_storage( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::StaticFiles, + )?; } // Only commit if we have imported as many receipts as the number of transactions. diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index c76c2c732a2..e1c25c7d5fa 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -15,10 +15,9 @@ use reth_primitives::{SealedHeader, StaticFileSegment}; use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - writer::UnifiedStorageWriter, BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, - StatsReader, TransactionVariant, + StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -180,9 +179,8 @@ where + StaticFileProviderFactory + StatsReader + StateChangeWriter - + BlockHashReader, - for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a, Provider::Primitives>>: - StateWriter, + + BlockHashReader + + StateWriter, { /// Return the id of the stage fn id(&self) -> StageId { @@ -211,7 +209,7 @@ where let static_file_provider = provider.static_file_provider(); // We only use static files for Receipts, if there is no receipt pruning of any kind. - let static_file_producer = if self.prune_modes.receipts.is_none() && + let write_receipts_to = if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); @@ -220,9 +218,9 @@ where // Since there might be a database <-> static file inconsistency (read // `prepare_static_file_producer` for context), we commit the change straight away. producer.commit()?; - Some(producer) + StorageLocation::StaticFiles } else { - None + StorageLocation::Database }; let db = StateProviderDatabase(LatestStateProviderRef::new(provider)); @@ -362,8 +360,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); - writer.write_to_storage(state, OriginalValuesKnown::Yes)?; + provider.write_to_storage(state, OriginalValuesKnown::Yes, write_receipts_to)?; let db_write_duration = time.elapsed(); debug!( diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index e14796d2686..367190b587e 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -14,7 +14,7 @@ use reth_provider::{ BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, - StateWriter, StaticFileProviderFactory, TrieWriter, + StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -76,6 +76,7 @@ where + HeaderProvider + HashingWriter + StateChangeWriter + + StateWriter + AsRef, { let chain = factory.chain_spec(); @@ -147,6 +148,7 @@ where + DBProvider + StateChangeWriter + HeaderProvider + + StateWriter + AsRef, { insert_state(provider, alloc, 0) @@ -163,6 +165,7 @@ where + DBProvider + StateChangeWriter + HeaderProvider + + StateWriter + AsRef, { let capacity = alloc.size_hint().1.unwrap_or(0); @@ -230,8 +233,11 @@ where Vec::new(), ); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_to_storage( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; trace!(target: "reth::cli", "Inserted state"); @@ -351,6 +357,7 @@ where + HashingWriter + StateChangeWriter + TrieWriter + + StateWriter + AsRef, { let block = provider_rw.last_block_number()?; @@ -470,6 +477,7 @@ where + HeaderProvider + HashingWriter + HistoryWriter + + StateWriter + StateChangeWriter + AsRef, { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 73554062b25..67f9cfe587d 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -904,13 +904,18 @@ mod tests { .unwrap_or_default(); // Insert blocks into the database - for block in &database_blocks { + for (block, receipts) in database_blocks.iter().zip(&receipts) { // TODO: this should be moved inside `insert_historical_block`: let mut transactions_writer = static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; transactions_writer.increment_block(block.number)?; - for tx in block.body.transactions() { + receipts_writer.increment_block(block.number)?; + + for (tx, receipt) in block.body.transactions().iter().zip(receipts) { transactions_writer.append_transaction(tx_num, tx)?; + receipts_writer.append_receipt(tx_num, receipt)?; tx_num += 1; } @@ -919,19 +924,6 @@ mod tests { )?; } - // Insert receipts into the static files - UnifiedStorageWriter::new( - &provider_rw, - Some(factory.static_file_provider().latest_writer(StaticFileSegment::Receipts)?), - ) - .append_receipts_from_blocks( - // The initial block number is required - database_blocks.first().map(|b| b.number).unwrap_or_default(), - receipts[..database_blocks.len()] - .iter() - .map(|vec| vec.clone().into_iter().map(Some).collect::>()), - )?; - // Commit to both storages: database and static files UnifiedStorageWriter::commit(provider_rw)?; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index f04cb96691c..1e64b4f8491 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,7 +9,6 @@ use crate::{ traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, - writer::UnifiedStorageWriter, AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, @@ -3017,12 +3016,11 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertBlock); } - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - // TODO: should _these_ be moved to storagewriter? seems like storagewriter should be - // _above_ db provider - let mut storage_writer = UnifiedStorageWriter::from_database(self); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.write_to_storage( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::Database, + )?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes @@ -3142,3 +3140,73 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } } + +impl StateWriter + for DatabaseProvider +{ + fn write_to_storage( + &self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, + ) -> ProviderResult<()> { + let (plain_state, reverts) = + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + + self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_changes(plain_state)?; + + let mut bodies_cursor = self.tx.cursor_read::()?; + + let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || + execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); + + // Prepare receipts cursor if we are going to write receipts to the database + // + // We are writing to database if requested or if there's any kind of receipt pruning + // configured + let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) + .then(|| self.tx.cursor_write::()) + .transpose()?; + + // Prepare receipts static writer if we are going to write receipts to static files + // + // We are writing to static files if requested and if there's no receipt pruning configured + let mut receipts_static_writer = (write_receipts_to.static_files() && + !has_receipts_pruning) + .then(|| { + self.static_file_provider + .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) + }) + .transpose()?; + + for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + let block_number = execution_outcome.first_block + idx as u64; + + // Increment block number for receipts static file writer + if let Some(writer) = receipts_static_writer.as_mut() { + writer.increment_block(block_number)?; + } + + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for (idx, receipt) in receipts.into_iter().enumerate() { + let receipt_idx = first_tx_index + idx as u64; + if let Some(receipt) = receipt { + if let Some(writer) = &mut receipts_static_writer { + writer.append_receipt(receipt_idx, &receipt)?; + } + + if let Some(cursor) = &mut receipts_cursor { + cursor.append(receipt_idx, receipt)?; + } + } + } + } + + Ok(()) + } +} diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 3d62b1886e8..ec189a95e3d 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -8,14 +8,17 @@ use revm::db::{ }; use std::ops::RangeInclusive; +use super::StorageLocation; + /// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. fn write_to_storage( - &mut self, + &self, execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs deleted file mode 100644 index 1436fb8a6ab..00000000000 --- a/crates/storage/provider/src/writer/database.rs +++ /dev/null @@ -1,29 +0,0 @@ -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - tables, -}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for DatabaseWriter<'_, W> -where - W: DbCursorRO + DbCursorRW, -{ - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - _: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - if let Some(receipt) = receipt { - self.0.append(first_tx_index + tx_idx as u64, receipt)?; - } - } - Ok(()) - } -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 28552514362..a88234ba305 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,35 +1,17 @@ use crate::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, - writer::static_file::StaticFileWriter, + providers::{StaticFileProvider, StaticFileWriter as SfWriter}, BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, }; -use alloy_primitives::BlockNumber; use reth_chain_state::ExecutedBlock; -use reth_db::{ - cursor::DbCursorRO, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::ExecutionOutcome; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_errors::ProviderResult; use reth_primitives::StaticFileSegment; -use reth_storage_api::{ - DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, -}; +use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; use tracing::debug; -mod database; -mod static_file; -use database::DatabaseWriter; - -enum StorageType { - Database(C), - StaticFile(S), -} - /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database /// and static file providers. #[derive(Debug)] @@ -81,14 +63,6 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF self.static_file.as_ref().expect("should exist") } - /// Returns a mutable reference to the static file instance. - /// - /// # Panics - /// If the static file instance is not set. - fn static_file_mut(&mut self) -> &mut ProviderSF { - self.static_file.as_mut().expect("should exist") - } - /// Ensures that the static file instance is set. /// /// # Returns @@ -148,6 +122,7 @@ where + TransactionsProviderExt + StateChangeWriter + TrieWriter + + StateWriter + HistoryWriter + StageCheckpointWriter + BlockExecutionWriter @@ -169,16 +144,6 @@ where debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - // Only write receipts to static files if there is no receipt pruning configured. - let mut state_writer = if self.database().prune_modes_ref().has_receipts_pruning() { - UnifiedStorageWriter::from_database(self.database()) - } else { - UnifiedStorageWriter::from( - self.database(), - self.static_file().get_writer(first_block.number, StaticFileSegment::Receipts)?, - ) - }; - // TODO: remove all the clones and do performant / batched writes for each type of object // instead of a loop over all blocks, // meaning: @@ -196,7 +161,11 @@ where // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. let execution_outcome = block.execution_outcome().clone(); - state_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.database().write_to_storage( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::StaticFiles, + )?; // insert hashes and intermediate merkle nodes { @@ -261,149 +230,6 @@ where } } -impl - UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, -{ - /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. - /// - /// # Returns - /// - `Ok(())` if the static file writer is set. - /// - `Err(StorageWriterError::MissingStaticFileWriter)` if the static file instance is not set. - fn ensure_static_file_segment( - &self, - segment: StaticFileSegment, - ) -> Result<(), UnifiedStorageWriterError> { - match &self.static_file { - Some(writer) => { - if writer.user_header().segment() == segment { - Ok(()) - } else { - Err(UnifiedStorageWriterError::IncorrectStaticFileWriter( - writer.user_header().segment(), - segment, - )) - } - } - None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), - } - } -} - -impl - UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, -{ - /// Appends receipts block by block. - /// - /// ATTENTION: If called from [`UnifiedStorageWriter`] without a static file producer, it will - /// always write them to database. Otherwise, it will look into the pruning configuration to - /// decide. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Receipts segment. - /// - /// # Parameters - /// - `initial_block_number`: The starting block number. - /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If - /// `receipt` is `None`, it has been pruned. - pub fn append_receipts_from_blocks( - &mut self, - initial_block_number: BlockNumber, - blocks: impl Iterator>>, - ) -> ProviderResult<()> { - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - // We write receipts to database in two situations: - // * If we are in live sync. In this case, `UnifiedStorageWriter` is built without a static - // file writer. - // * If there is any kind of receipt pruning - let mut storage_type = if self.static_file.is_none() || - self.database().prune_modes_ref().has_receipts_pruning() - { - StorageType::Database(self.database().tx_ref().cursor_write::()?) - } else { - self.ensure_static_file_segment(StaticFileSegment::Receipts)?; - StorageType::StaticFile(self.static_file_mut()) - }; - - let mut last_tx_idx = None; - for (idx, receipts) in blocks.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let first_tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - // update for empty blocks - last_tx_idx = Some(first_tx_index); - - match &mut storage_type { - StorageType::Database(cursor) => { - DatabaseWriter(cursor).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - StorageType::StaticFile(sf) => { - StaticFileWriter(*sf).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - }; - } - - Ok(()) - } -} - -impl StateWriter - for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider - + StateChangeWriter - + HeaderProvider - + StaticFileProviderFactory, -{ - /// Write the data and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - let (plain_state, reverts) = - execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - - self.database().write_state_reverts(reverts, execution_outcome.first_block)?; - - self.append_receipts_from_blocks( - execution_outcome.first_block, - execution_outcome.receipts.into_iter(), - )?; - - self.database().write_state_changes(plain_state)?; - - Ok(()) - } -} - #[cfg(test)] mod tests { use super::*; @@ -417,6 +243,7 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; + use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ @@ -679,9 +506,8 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -780,9 +606,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -848,9 +673,8 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -997,9 +821,8 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1163,9 +986,8 @@ mod tests { init_state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1211,9 +1033,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs deleted file mode 100644 index f7227d21ef3..00000000000 --- a/crates/storage/provider/src/writer/static_file.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::providers::StaticFileProviderRWRefMut; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_errors::ProviderResult; -use reth_node_types::NodePrimitives; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_, N>> { - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - // Increment block on static file header. - self.0.increment_block(block_number)?; - let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { - Ok(( - first_tx_index + tx_idx as u64, - receipt - .as_ref() - .expect("receipt should not be filtered when saving to static files."), - )) - }); - self.0.append_receipts(receipts)?; - Ok(()) - } -} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 06c6103ee9b..bd6b978e375 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,6 +1,6 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, TxHash, TxNumber}; +use alloy_primitives::{TxHash, TxNumber}; use reth_primitives::Receipt; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; @@ -68,20 +68,3 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { self.receipts_by_block_id(number_or_tag.into()) } } - -/// Writer trait for writing [`Receipt`] data. -pub trait ReceiptWriter { - /// Appends receipts for a block. - /// - /// # Parameters - /// - `first_tx_index`: The transaction number of the first receipt in the block. - /// - `block_number`: The block number to which the receipts belong. - /// - `receipts`: A vector of optional receipts in the block. If `None`, it means they were - /// pruned. - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()>; -} From 4dfaa4638041662f051e397cdf593462ee017ac6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 16:19:58 +0100 Subject: [PATCH 705/970] chore: unify recover_singer (#12881) --- Cargo.lock | 3 ++ crates/engine/invalid-block-hooks/Cargo.toml | 1 + .../engine/invalid-block-hooks/src/witness.rs | 4 +- .../primitives/benches/recover_ecdsa_crit.rs | 1 + crates/primitives/src/transaction/mod.rs | 49 +++++-------------- .../src/segments/user/sender_recovery.rs | 1 + crates/rpc/rpc-eth-types/src/gas_oracle.rs | 14 +++--- crates/rpc/rpc-eth-types/src/receipt.rs | 4 +- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/debug.rs | 1 + crates/stages/stages/src/stages/prune.rs | 1 + .../stages/src/stages/sender_recovery.rs | 12 ++--- .../src/providers/blockchain_provider.rs | 14 +++--- .../provider/src/providers/database/mod.rs | 1 + .../storage/provider/src/test_utils/mock.rs | 1 + testing/testing-utils/Cargo.toml | 1 + testing/testing-utils/src/generators.rs | 1 + 17 files changed, 50 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7db867d5545..5e1065fd529 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7698,6 +7698,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -8785,6 +8786,7 @@ dependencies = [ "reth-network-types", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -9269,6 +9271,7 @@ dependencies = [ "alloy-primitives", "rand 0.8.5", "reth-primitives", + "reth-primitives-traits", "secp256k1", ] diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index e5eb998dc1f..a7b0153d0d4 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-revm = { workspace = true, features = ["serde"] } reth-rpc-api = { workspace = true, features = ["client"] } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 4e92411ea12..a9cafbdb12e 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,5 +1,3 @@ -use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; - use alloy_consensus::Header; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; @@ -11,6 +9,7 @@ use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, @@ -22,6 +21,7 @@ use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; use serde::Serialize; +use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; /// Generates a witness for the given block and saves it to a file. #[derive(Debug)] diff --git a/crates/primitives/benches/recover_ecdsa_crit.rs b/crates/primitives/benches/recover_ecdsa_crit.rs index 8e8e279b2a4..9273d71f6f5 100644 --- a/crates/primitives/benches/recover_ecdsa_crit.rs +++ b/crates/primitives/benches/recover_ecdsa_crit.rs @@ -4,6 +4,7 @@ use alloy_rlp::Decodable; use criterion::{criterion_group, criterion_main, Criterion}; use pprof::criterion::{Output, PProfProfiler}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; /// Benchmarks the recovery of the public key from the ECDSA message using criterion. pub fn criterion_benchmark(c: &mut Criterion) { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f61415ec2ec..bc8bb78fe80 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1074,42 +1074,6 @@ impl TransactionSigned { self.hash.get_or_init(|| self.recalculate_hash()) } - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [`recover_signer`]. - /// - /// Note: - /// - /// This can fail for some early ethereum mainnet transactions pre EIP-2, use - /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that - /// the signature has a low `s` value. - pub fn recover_signer(&self) -> Option

{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - pub fn recover_signer_unchecked(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer_unchecked(&self.signature, signature_hash) - } - /// Recovers a list of signers from a transaction list iterator. /// /// Returns `None`, if some transaction's signature is invalid, see also @@ -1281,11 +1245,23 @@ impl SignedTransaction for TransactionSigned { } fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } let signature_hash = self.signature_hash(); recover_signer(&self.signature, signature_hash) } fn recover_signer_unchecked(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } @@ -1971,6 +1947,7 @@ mod tests { use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index f189e6c36af..77bb0a5e2d4 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -90,6 +90,7 @@ mod tests { Itertools, }; use reth_db::tables; + use reth_primitives_traits::SignedTransaction; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index d73cd72b650..3f8186ae150 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -7,7 +7,14 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_rpc_server_types::constants; +use reth_primitives_traits::SignedTransaction; +use reth_rpc_server_types::{ + constants, + constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, + }, +}; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; @@ -15,11 +22,6 @@ use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; -use reth_rpc_server_types::constants::gas_oracle::{ - DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, - DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, -}; - use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; /// The default gas limit for `eth_call` and adjacent calls. See diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 247b4449ef5..3136d42e958 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,13 +1,13 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use super::{EthApiError, EthResult}; use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; +use reth_primitives_traits::SignedTransaction; use revm_primitives::calc_blob_gasprice; -use super::{EthApiError, EthResult}; - /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( transaction: &TransactionSigned, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 804ecd11120..834b1a963bf 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index ad3294d503c..f16faddbfff 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -19,6 +19,7 @@ use reth_evm::{ ConfigureEvmEnv, }; use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 8adf2fcad54..527f5376697 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -171,6 +171,7 @@ mod tests { }; use alloy_primitives::B256; use reth_primitives::SealedBlock; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt, }; diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index d611062b565..a6c2537c185 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -361,10 +361,16 @@ struct FailedSenderRecoveryError { #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, + TestRunnerError, TestStageDB, UnwindStageTestRunner, + }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::cursor::DbCursorRO; use reth_primitives::{SealedBlock, TransactionSigned}; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -375,12 +381,6 @@ mod tests { self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - use super::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, - TestRunnerError, TestStageDB, UnwindStageTestRunner, - }; - stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery); /// Execute a block range with a single transaction diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 67f9cfe587d..26a652e7e67 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -775,12 +775,6 @@ impl StateReader for BlockchainProvider2 { #[cfg(test)] mod tests { - use std::{ - ops::{Range, RangeBounds}, - sync::Arc, - time::Instant, - }; - use crate::{ providers::BlockchainProvider2, test_utils::{ @@ -812,7 +806,7 @@ mod tests { use reth_primitives::{ BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, }; - use reth_primitives_traits::BlockBody as _; + use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, @@ -824,7 +818,11 @@ mod tests { random_receipt, BlockParams, BlockRangeParams, }; use revm::db::BundleState; - use std::ops::Bound; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Instant, + }; const TEST_BLOCKS_COUNT: usize = 5; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 90645de9b5a..e033803680a 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -670,6 +670,7 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_primitives::StaticFileSegment; + use reth_primitives_traits::SignedTransaction; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index c95ba0ed7cb..cfee10f9e38 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -26,6 +26,7 @@ use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index d0de37bf77f..a6197d7e0cf 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -24,3 +24,4 @@ secp256k1 = { workspace = true, features = ["rand"] } [dev-dependencies] alloy-eips.workspace = true +reth-primitives-traits .workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index d8f3a29790b..9963b447e96 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -453,6 +453,7 @@ mod tests { use alloy_eips::eip2930::AccessList; use alloy_primitives::{hex, PrimitiveSignature as Signature}; use reth_primitives::public_key_to_address; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] From 6bba5e6630b199854f2a86d37946793e37b21519 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 26 Nov 2024 16:37:34 +0100 Subject: [PATCH 706/970] chore(ci): debug logs on kurtosis-op client advance check failure (#12882) --- .github/assets/hive/Dockerfile | 3 ++- .github/workflows/kurtosis-op.yml | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile index 9f75ba6f1cf..25b71bf2187 100644 --- a/.github/assets/hive/Dockerfile +++ b/.github/assets/hive/Dockerfile @@ -5,4 +5,5 @@ COPY dist/reth /usr/local/bin COPY LICENSE-* ./ EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file +ENV RUST_LOG=debug +ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 2652992fca9..c7307d10c7b 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -102,6 +102,8 @@ jobs: if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis exit 1 From ebf837e6e88ce4e276831c595700a05a3e2646ed Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 17:21:52 +0100 Subject: [PATCH 707/970] chore: unify more tx signed fns (#12883) --- crates/net/network/src/transactions/mod.rs | 2 +- crates/primitives/src/transaction/mod.rs | 16 +++------------- crates/primitives/src/transaction/signature.rs | 1 + crates/transaction-pool/src/maintain.rs | 3 ++- crates/transaction-pool/src/traits.rs | 3 ++- 5 files changed, 9 insertions(+), 16 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 227e812fc74..ff76a6d2921 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -2186,7 +2186,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(signed_tx.hash_ref()).is_some()); + assert!(pool.get(signed_tx.tx_hash()).is_some()); handle.terminate().await; } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index bc8bb78fe80..db789d1f6de 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1035,7 +1035,7 @@ impl PartialEq for TransactionSigned { fn eq(&self, other: &Self) -> bool { self.signature == other.signature && self.transaction == other.transaction && - self.hash_ref() == other.hash_ref() + self.tx_hash() == other.tx_hash() } } @@ -1054,11 +1054,6 @@ impl TransactionSigned { Self { hash: Default::default(), signature, transaction } } - /// Transaction signature. - pub const fn signature(&self) -> &Signature { - &self.signature - } - /// Transaction pub const fn transaction(&self) -> &Transaction { &self.transaction @@ -1066,12 +1061,7 @@ impl TransactionSigned { /// Transaction hash. Used to identify transaction. pub fn hash(&self) -> TxHash { - *self.hash_ref() - } - - /// Reference to transaction hash. Used to identify transaction. - pub fn hash_ref(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) + *self.tx_hash() } /// Recovers a list of signers from a transaction list iterator. @@ -1237,7 +1227,7 @@ impl SignedTransaction for TransactionSigned { type Type = TxType; fn tx_hash(&self) -> &TxHash { - self.hash_ref() + self.hash.get_or_init(|| self.recalculate_hash()) } fn signature(&self) -> &Signature { diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 8fab719947a..6056266ae0f 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -72,6 +72,7 @@ mod tests { }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 47e70e91433..02f218d4b09 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -21,6 +21,7 @@ use reth_primitives::{ PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ @@ -317,7 +318,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(tx.hash_ref())) + .filter(|tx| !new_mined_transactions.contains(tx.tx_hash())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2973e36f7a6..8945d713976 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -23,6 +23,7 @@ use reth_primitives::{ PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -1244,7 +1245,7 @@ impl PoolTransaction for EthPooledTransaction { /// Returns hash of the transaction. fn hash(&self) -> &TxHash { - self.transaction.hash_ref() + self.transaction.tx_hash() } /// Returns the Sender of the transaction. From d51b347c8197a463cdcca49fa1a882c3c764135a Mon Sep 17 00:00:00 2001 From: ftupas <35031356+ftupas@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:24:01 +0100 Subject: [PATCH 708/970] feat: add utility trait methods to Transaction (#12704) --- .../primitives-traits/src/transaction/mod.rs | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index b67e51024bf..3a0871c99a4 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -7,6 +7,11 @@ pub mod tx_type; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; + /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullTransaction: Transaction + MaybeCompact {} @@ -27,6 +32,35 @@ pub trait Transaction: + MaybeSerde + MaybeArbitrary { + /// Returns true if the transaction is a legacy transaction. + #[inline] + fn is_legacy(&self) -> bool { + self.ty() == LEGACY_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + fn is_eip2930(&self) -> bool { + self.ty() == EIP2930_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + fn is_eip1559(&self) -> bool { + self.ty() == EIP1559_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + fn is_eip4844(&self) -> bool { + self.ty() == EIP4844_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-7702 transaction. + #[inline] + fn is_eip7702(&self) -> bool { + self.ty() == EIP7702_TX_TYPE_ID + } } impl Transaction for T where From 2ce741709f9c29bae2e253bda3397c36def8351d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 26 Nov 2024 20:26:36 +0400 Subject: [PATCH 709/970] feat: relax provider bounds (#12857) --- Cargo.lock | 2 +- .../src/commands/debug_cmd/build_block.rs | 4 +- .../commands/debug_cmd/in_memory_merkle.rs | 10 +- bin/reth/src/commands/debug_cmd/merkle.rs | 9 +- .../src/commands/debug_cmd/replay_engine.rs | 9 +- crates/blockchain-tree/src/externals.rs | 16 +-- crates/chain-state/src/in_memory.rs | 112 ++++++++++-------- crates/chain-state/src/memory_overlay.rs | 7 +- crates/chain-state/src/notifications.rs | 8 +- .../cli/commands/src/stage/dump/execution.rs | 12 +- crates/cli/commands/src/stage/dump/merkle.rs | 12 +- crates/consensus/beacon/src/engine/mod.rs | 14 ++- crates/engine/tree/Cargo.toml | 2 - crates/engine/tree/src/persistence.rs | 18 +-- crates/engine/tree/src/tree/mod.rs | 2 +- crates/ethereum/node/tests/e2e/dev.rs | 6 +- crates/evm/execution-types/Cargo.toml | 4 + crates/evm/execution-types/src/chain.rs | 95 ++++++++------- crates/node/builder/src/builder/mod.rs | 8 +- crates/node/builder/src/launch/common.rs | 10 +- crates/node/builder/src/setup.rs | 6 +- crates/optimism/rpc/src/eth/block.rs | 1 - crates/primitives-traits/src/block/body.rs | 10 ++ crates/primitives-traits/src/block/mod.rs | 2 +- crates/primitives-traits/src/node.rs | 2 +- crates/primitives/src/block.rs | 26 ++-- crates/primitives/src/traits.rs | 18 ++- crates/rpc/rpc-eth-types/src/builder/ctx.rs | 8 +- crates/rpc/rpc-eth-types/src/fee_history.rs | 4 +- crates/rpc/rpc/src/eth/core.rs | 8 +- crates/rpc/rpc/src/eth/helpers/block.rs | 1 - crates/rpc/rpc/src/eth/pubsub.rs | 28 ++++- .../src/providers/blockchain_provider.rs | 32 +++-- .../provider/src/providers/consistent.rs | 53 ++++----- .../src/providers/database/provider.rs | 4 +- crates/storage/provider/src/providers/mod.rs | 26 ++-- .../transaction-pool/src/blobstore/tracker.rs | 12 +- 37 files changed, 365 insertions(+), 236 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e1065fd529..976058b5d4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7270,7 +7270,6 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", - "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -7565,6 +7564,7 @@ dependencies = [ name = "reth-execution-types" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "arbitrary", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 5fc78e884e9..dc00e07d883 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -25,7 +25,7 @@ use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, + BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlockFor, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ @@ -90,7 +90,7 @@ impl> Command { fn lookup_best_block>( &self, factory: ProviderFactory, - ) -> RethResult> { + ) -> RethResult>>> { let provider = factory.provider()?; let best_number = diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index dec96b50408..d592e956c20 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -19,6 +19,7 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockExt; use reth_provider::{ @@ -56,7 +57,12 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -143,7 +149,7 @@ impl> Command { ( &block .clone() - .unseal() + .unseal::>() .with_recovered_senders() .ok_or(BlockValidationError::SenderRecoveryError)?, merkle_block_td + block.difficulty, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 7e7579f9928..ba6fd12f895 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,7 +17,7 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::BlockTy; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, @@ -56,7 +56,12 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives, + >, + >( &self, config: &Config, task_executor: TaskExecutor, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 7daead83a84..40987167391 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,7 +18,7 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{EngineApiMessageVersion, NodeTypesWithDBAdapter}; +use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ @@ -55,7 +55,12 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives, + >, + >( &self, config: &Config, task_executor: TaskExecutor, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index d6dc84eee48..2a825921f89 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -5,23 +5,15 @@ use reth_consensus::Consensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; -use reth_primitives::{EthPrimitives, StaticFileSegment}; +use reth_primitives::StaticFileSegment; use reth_provider::{ - providers::{NodeTypesForProvider, ProviderNodeTypes}, - ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, - StatsReader, + providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, + StaticFileProviderFactory, StatsReader, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; -/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. -pub trait NodeTypesForTree: NodeTypesForProvider {} - -impl NodeTypesForTree for T where T: NodeTypesForProvider {} - -/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. -pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} -impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} +pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; /// A container for external components. /// diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 5bb3911f9f3..f43aae562e0 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,18 +4,18 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_consensus::Header; -use alloy_eips::{BlockHashOrNumber, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, NodePrimitives, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + BlockWithSenders, HeaderExt, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; -use reth_primitives_traits::BlockBody as _; +use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -159,10 +159,8 @@ impl CanonicalInMemoryStateInner { } } -type PendingBlockAndReceipts = ( - SealedBlock, reth_primitives_traits::BodyTy>, - Vec>, -); +type PendingBlockAndReceipts = + (SealedBlockFor<::Block>, Vec>); /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that @@ -172,13 +170,7 @@ pub struct CanonicalInMemoryState>, } -impl CanonicalInMemoryState -where - N: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = reth_primitives::BlockBody, - >, -{ +impl CanonicalInMemoryState { /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( @@ -250,7 +242,7 @@ where /// Note: This assumes that the parent block of the pending block is canonical. pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block - let parent = self.state_by_hash(pending.block().parent_hash); + let parent = self.state_by_hash(pending.block().parent_hash()); let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); @@ -274,14 +266,14 @@ where // we first remove the blocks from the reorged chain for block in reorged { let hash = block.block().hash(); - let number = block.block().number; + let number = block.block().number(); blocks.remove(&hash); numbers.remove(&number); } // insert the new blocks for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); + let parent = blocks.get(&block.block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -341,16 +333,16 @@ where // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block_ref().block().number > persisted_height) + .filter(|(_, b)| b.block_ref().block().number() > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) - old_blocks.sort_unstable_by_key(|block| block.block().number); + old_blocks.sort_unstable_by_key(|block| block.block().number()); // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); + let parent = blocks.get(&block.block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -363,7 +355,7 @@ where // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block_ref().block.parent_hash).cloned(); + p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned(); } }); } @@ -474,7 +466,7 @@ where } /// Returns the `Header` corresponding to the pending state. - pub fn pending_header(&self) -> Option
{ + pub fn pending_header(&self) -> Option { self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) } @@ -484,7 +476,10 @@ where } /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option { + pub fn pending_block_with_senders(&self) -> Option> + where + N::SignedTx: SignedTransaction, + { self.pending_state() .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) } @@ -546,7 +541,10 @@ where } /// Returns a `TransactionSigned` for the given `TxHash` if found. - pub fn transaction_by_hash(&self, hash: TxHash) -> Option { + pub fn transaction_by_hash(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { if let Some(tx) = block_state .block_ref() @@ -554,7 +552,7 @@ where .body .transactions() .iter() - .find(|tx| tx.hash() == hash) + .find(|tx| tx.trie_hash() == hash) { return Some(tx.clone()) } @@ -567,7 +565,10 @@ where pub fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state .block_ref() @@ -576,16 +577,16 @@ where .transactions() .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; return Some((tx.clone(), meta)) } @@ -636,14 +637,15 @@ impl BlockState { } /// Returns the block with senders for the state. - pub fn block_with_senders(&self) -> BlockWithSenders { + pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders::new_unchecked(block.unseal(), senders) + let (header, body) = block.split(); + BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } /// Returns the sealed block with senders for the state. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); SealedBlockWithSenders { block, senders } @@ -656,13 +658,13 @@ impl BlockState { /// Returns the block number of executed block that determines the state. pub fn number(&self) -> u64 { - self.block.block().number + self.block.block().number() } /// Returns the state root after applying the executed block that determines /// the state. pub fn state_root(&self) -> B256 { - self.block.block().header.state_root + self.block.block().header.state_root() } /// Returns the `Receipts` of executed block that determines the state. @@ -748,7 +750,10 @@ impl BlockState { } /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. - pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + pub fn transaction_on_chain(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() @@ -756,7 +761,7 @@ impl BlockState { .body .transactions() .iter() - .find(|tx| tx.hash() == hash) + .find(|tx| tx.trie_hash() == hash) .cloned() }) } @@ -765,7 +770,10 @@ impl BlockState { pub fn transaction_meta_on_chain( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() @@ -774,16 +782,16 @@ impl BlockState { .transactions() .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) .map(|(index, tx)| { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; (tx.clone(), meta) }) @@ -795,7 +803,7 @@ impl BlockState { #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. - pub block: Arc, + pub block: Arc>, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. @@ -809,7 +817,7 @@ pub struct ExecutedBlock { impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc, + block: Arc>, senders: Arc>, execution_output: Arc>, hashed_state: Arc, @@ -819,7 +827,7 @@ impl ExecutedBlock { } /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlockFor { &self.block } @@ -831,7 +839,7 @@ impl ExecutedBlock { /// Returns a [`SealedBlockWithSenders`] /// /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } } @@ -869,7 +877,7 @@ pub enum NewCanonicalChain { }, } -impl NewCanonicalChain { +impl> NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -922,7 +930,7 @@ impl NewCanonicalChain { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlock { + pub fn tip(&self) -> &SealedBlockFor { match self { Self::Commit { new } | Self::Reorg { new, .. } => { new.last().expect("non empty blocks").block() diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 8bc4ada9e8d..c84bd8c93f0 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,4 +1,5 @@ use super::ExecutedBlock; +use alloy_consensus::BlockHeader; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -75,7 +76,7 @@ macro_rules! impl_state_provider { impl $($tokens)* BlockHashReader for $type { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in &self.in_memory { - if block.block.number == number { + if block.block.number() == number { return Ok(Some(block.block.hash())) } } @@ -92,9 +93,9 @@ macro_rules! impl_state_provider { let mut earliest_block_number = None; let mut in_memory_hashes = Vec::new(); for block in &self.in_memory { - if range.contains(&block.block.number) { + if range.contains(&block.block.number()) { in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + earliest_block_number = Some(block.block.number()); } } diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index a87d972907e..2aae56aea2a 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,5 +1,6 @@ //! Canonical chain state notification trait and types. +use alloy_eips::eip2718::Encodable2718; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; @@ -122,7 +123,7 @@ impl CanonStateNotification { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.tip(), } @@ -133,7 +134,10 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> + where + N::SignedTx: Encodable2718, + { let mut receipts = Vec::new(); // get old receipts diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 19704cb1c2f..4afcdf4461e 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -7,6 +7,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; +use reth_node_api::NodePrimitives; use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ @@ -25,7 +26,10 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives, + >, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -131,7 +135,9 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes>, +>( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -168,7 +174,7 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index f2688c365e1..3fa0c4f0728 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -9,6 +9,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_api::NodePrimitives; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -24,7 +25,12 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage>>( +pub(crate) async fn dump_merkle_stage< + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives, + >, +>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -67,7 +73,9 @@ pub(crate) async fn dump_merkle_stage } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes>, +>( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 13195a5885d..7a894f08e1c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -26,7 +26,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -84,9 +84,15 @@ const MAX_INVALID_HEADERS: u32 = 512u32; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} +pub trait EngineNodeTypes: + ProviderNodeTypes + NodeTypesWithEngine +{ +} -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} +impl EngineNodeTypes for T where + T: ProviderNodeTypes + NodeTypesWithEngine +{ +} /// Represents a pending forkchoice update. /// @@ -2879,7 +2885,7 @@ mod tests { block1.header.set_difficulty( MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), ); - block1 = block1.unseal().seal_slow(); + block1 = block1.unseal::().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; block2.body.withdrawals = None; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 5242268b175..7a71ce411eb 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -26,7 +26,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true -reth-primitives-traits.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -101,7 +100,6 @@ test-utils = [ "reth-evm/test-utils", "reth-network-p2p/test-utils", "reth-payload-builder/test-utils", - "reth-primitives-traits/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", "reth-prune-types", diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index e56ed30c58b..50eb6aa2803 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,8 +2,7 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; -use reth_primitives::BlockBody; -use reth_primitives_traits::FullNodePrimitives; +use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -20,18 +19,9 @@ use tracing::{debug, error}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within /// [`PersistenceService`]. -pub trait PersistenceNodeTypes: - ProviderNodeTypes< - Primitives: FullNodePrimitives, -> -{ -} -impl PersistenceNodeTypes for T where - T: ProviderNodeTypes< - Primitives: FullNodePrimitives, - > -{ -} +pub trait PersistenceNodeTypes: ProviderNodeTypes {} +impl PersistenceNodeTypes for T where T: ProviderNodeTypes {} + /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 9b689d18cfe..8819cda966b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2281,7 +2281,7 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index bdf444c8109..6d09612c4e1 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,11 +1,12 @@ use std::sync::Arc; +use alloy_eips::eip2718::Encodable2718; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; @@ -46,6 +47,7 @@ async fn assert_chain_advances(node: FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, + N::Types: NodeTypes, { let mut notifications = node.provider.canonical_state_stream(); @@ -64,7 +66,7 @@ where let head = notifications.next().await.unwrap(); let tx = &head.tip().transactions()[0]; - assert_eq!(tx.hash(), hash); + assert_eq!(tx.trie_hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 5eb3cc38437..36b36fc0216 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -20,6 +20,7 @@ reth-trie.workspace = true revm.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -42,6 +43,7 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "reth-primitives-traits/serde", + "alloy-consensus/serde", "reth-trie-common/serde", "reth-trie/serde", ] @@ -51,6 +53,7 @@ serde-bincode-compat = [ "reth-trie-common/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", ] std = [ "reth-primitives/std", @@ -59,4 +62,5 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", + "alloy-consensus/std", ] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index e09b87ed680..339a188166d 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,16 +2,17 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; -use alloy_eips::{eip1898::ForkBlock, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, TransactionSignedEcRecovered, }; -use reth_primitives_traits::NodePrimitives; -use reth_trie_common::updates::TrieUpdates; +use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; +use reth_trie::updates::TrieUpdates; use revm::db::BundleState; /// A chain of blocks and their final state. @@ -28,7 +29,7 @@ use revm::db::BundleState; #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Chain { /// All blocks in this chain. - blocks: BTreeMap, + blocks: BTreeMap>, /// The outcome of block execution for this chain. /// /// This field contains the state of all accounts after the execution of all blocks in this @@ -49,11 +50,11 @@ impl Chain { /// /// A chain of blocks should not be empty. pub fn new( - blocks: impl IntoIterator, + blocks: impl IntoIterator>, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); + let blocks = blocks.into_iter().map(|b| (b.number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -61,7 +62,7 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { @@ -69,17 +70,17 @@ impl Chain { } /// Get the blocks in this chain. - pub const fn blocks(&self) -> &BTreeMap { + pub const fn blocks(&self) -> &BTreeMap> { &self.blocks } /// Consumes the type and only returns the blocks in this chain. - pub fn into_blocks(self) -> BTreeMap { + pub fn into_blocks(self) -> BTreeMap> { self.blocks } /// Returns an iterator over all headers in the block with increasing block numbers. - pub fn headers(&self) -> impl Iterator + '_ { + pub fn headers(&self) -> impl Iterator> + '_ { self.blocks.values().map(|block| block.header.clone()) } @@ -120,12 +121,15 @@ impl Chain { } /// Returns the block with matching hash. - pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlockFor> { self.block_with_senders(block_hash).map(|block| &block.block) } /// Returns the block with matching hash. - pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block_with_senders( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } @@ -134,7 +138,7 @@ impl Chain { &self, block_number: BlockNumber, ) -> Option> { - if self.tip().number == block_number { + if self.tip().number() == block_number { return Some(self.execution_outcome.clone()) } @@ -152,14 +156,14 @@ impl Chain { /// 3. The optional trie updates. pub fn into_inner( self, - ) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + ) -> (ChainBlocks<'static, N::Block>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_, N::Block>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } @@ -169,14 +173,15 @@ impl Chain { } /// Returns an iterator over all blocks in the chain with increasing block number. - pub fn blocks_iter(&self) -> impl Iterator + '_ { + pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) } /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator, &Vec>)> + '_ + { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -184,7 +189,7 @@ impl Chain { #[track_caller] pub fn fork_block(&self) -> ForkBlock { let first = self.first(); - ForkBlock { number: first.number.saturating_sub(1), hash: first.parent_hash } + ForkBlock { number: first.number().saturating_sub(1), hash: first.parent_hash() } } /// Get the first block in this chain. @@ -193,7 +198,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -203,7 +208,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -218,7 +223,7 @@ impl Chain { /// /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { - self.first().number..=self.tip().number + self.first().number()..=self.tip().number() } /// Get all receipts for the given block. @@ -230,15 +235,18 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec> { + pub fn receipts_with_attachment(&self) -> Vec> + where + N::SignedTx: Encodable2718, + { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { let mut tx_receipts = Vec::with_capacity(receipts.len()); - for (tx, receipt) in block.body.transactions.iter().zip(receipts.iter()) { + for (tx, receipt) in block.body.transactions().iter().zip(receipts.iter()) { tx_receipts.push(( - tx.hash(), + tx.trie_hash(), receipt.as_ref().expect("receipts have not been pruned").clone(), )); } @@ -252,10 +260,10 @@ impl Chain { /// This method assumes that blocks attachment to the chain has already been validated. pub fn append_block( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, execution_outcome: ExecutionOutcome, ) { - self.blocks.insert(block.number, block); + self.blocks.insert(block.number(), block); self.execution_outcome.extend(execution_outcome); self.trie_updates.take(); // reset } @@ -375,22 +383,22 @@ impl fmt::Display for DisplayBlocksChain<'_> { /// All blocks in the chain #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ChainBlocks<'a> { - blocks: Cow<'a, BTreeMap>, +pub struct ChainBlocks<'a, B: Block> { + blocks: Cow<'a, BTreeMap>>, } -impl ChainBlocks<'_> { +impl>> ChainBlocks<'_, B> { /// Creates a consuming iterator over all blocks in the chain with increasing block number. /// /// Note: this always yields at least one block. #[inline] - pub fn into_blocks(self) -> impl Iterator { + pub fn into_blocks(self) -> impl Iterator> { self.blocks.into_owned().into_values() } /// Creates an iterator over all blocks in the chain with increasing block number. #[inline] - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator)> { self.blocks.iter() } @@ -400,7 +408,7 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -410,21 +418,21 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Returns an iterator over all transactions in the chain. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.body.transactions.iter()) + pub fn transactions(&self) -> impl Iterator::Transaction> + '_ { + self.blocks.values().flat_map(|block| block.body.transactions().iter()) } /// Returns an iterator over all transactions and their senders. #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + '_ { self.blocks.values().flat_map(|block| block.transactions_with_sender()) } @@ -434,20 +442,21 @@ impl ChainBlocks<'_> { #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction>> + '_ + { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.hash())) + self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.trie_hash())) } } -impl IntoIterator for ChainBlocks<'_> { - type Item = (BlockNumber, SealedBlockWithSenders); - type IntoIter = std::collections::btree_map::IntoIter; +impl IntoIterator for ChainBlocks<'_, B> { + type Item = (BlockNumber, SealedBlockWithSenders); + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { #[allow(clippy::unnecessary_to_owned)] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 56ee7d9d640..c3361273433 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -244,7 +244,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, { self.with_types_and_provider() } @@ -268,7 +268,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -305,7 +305,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -336,7 +336,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index a1a2223a470..987839912f5 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -382,7 +382,10 @@ where pub async fn create_provider_factory(&self) -> eyre::Result> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, { let factory = ProviderFactory::new( self.right().clone(), @@ -449,7 +452,10 @@ where ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + >, { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 9b453234019..ea0e6b9fe79 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -41,7 +41,8 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives, + N::Primitives: + FullNodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -89,7 +90,8 @@ where H: HeaderDownloader
+ 'static, B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives, + N::Primitives: + FullNodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 22d26e824b3..64a55496993 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -35,7 +35,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); let l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index b01ef96d6f5..44120562c17 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -3,6 +3,7 @@ use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; +use alloy_eips::eip4895::Withdrawals; use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; @@ -30,9 +31,18 @@ pub trait BlockBody: /// Ordered list of signed transactions as committed in block. type Transaction: Transaction; + /// Ommer header type. + type OmmerHeader; + /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::Transaction]; /// Consume the block body and return a [`Vec`] of transactions. fn into_transactions(self) -> Vec; + + /// Returns block withdrawals if any. + fn withdrawals(&self) -> Option<&Withdrawals>; + + /// Returns block ommers if any. + fn ommers(&self) -> Option<&[Self::OmmerHeader]>; } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 10521075095..5b22ff590be 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -44,7 +44,7 @@ pub trait Block: type Header: BlockHeader + 'static; /// The block's body contains the transactions in the block. - type Body: BlockBody + Send + Sync + Unpin + 'static; + type Body: BlockBody + Send + Sync + Unpin + 'static; /// Create new block instance. fn new(header: Self::Header, body: Self::Body) -> Self; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index f109fdc9b24..8081a453ca3 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -14,7 +14,7 @@ pub trait NodePrimitives: /// Block header primitive. type BlockHeader: BlockHeader; /// Block body primitive. - type BlockBody: BlockBody; + type BlockBody: BlockBody; /// Signed version of the transaction type. type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; /// Transaction envelope type ID. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index ac71c791c33..5618d81bd8f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -267,11 +267,6 @@ impl SealedBlock { } impl SealedBlock { - /// Unseal the block - pub fn unseal(self) -> Block { - Block { header: self.header.unseal(), body: self.body } - } - /// Returns an iterator over all blob transactions of the block #[inline] pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { @@ -391,6 +386,14 @@ where Ok(SealedBlockWithSenders { block: self, senders }) } + /// Unseal the block + pub fn unseal(self) -> Block + where + Block: reth_primitives_traits::Block
, + { + Block::new(self.header.unseal(), self.body) + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -455,7 +458,7 @@ where impl reth_primitives_traits::Block for SealedBlock where H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, + B: reth_primitives_traits::BlockBody + 'static, Self: Serialize + for<'a> Deserialize<'a>, { type Header = H; @@ -684,6 +687,7 @@ impl InMemorySize for BlockBody { impl reth_primitives_traits::BlockBody for BlockBody { type Transaction = TransactionSigned; + type OmmerHeader = Header; fn transactions(&self) -> &[Self::Transaction] { &self.transactions @@ -692,6 +696,14 @@ impl reth_primitives_traits::BlockBody for BlockBody { fn into_transactions(self) -> Vec { self.transactions } + + fn withdrawals(&self) -> Option<&Withdrawals> { + self.withdrawals.as_ref() + } + + fn ommers(&self) -> Option<&[Self::OmmerHeader]> { + Some(&self.ommers) + } } impl From for BlockBody { @@ -1168,7 +1180,7 @@ mod tests { fn test_default_seal() { let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); - let block = block.unseal(); + let block: Block = block.unseal(); let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs index 73eabd8ec98..ec4e75c8c6d 100644 --- a/crates/primitives/src/traits.rs +++ b/crates/primitives/src/traits.rs @@ -3,8 +3,8 @@ use crate::{ BlockWithSenders, SealedBlock, }; use alloc::vec::Vec; -use alloy_eips::eip2718::Encodable2718; -use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedHeader, SignedTransaction}; use revm_primitives::{Address, B256}; /// Extension trait for [`reth_primitives_traits::Block`] implementations @@ -121,3 +121,17 @@ pub trait BlockBodyTxExt: BlockBody { } impl BlockBodyTxExt for T {} + +/// Extension trait for [`BlockHeader`] adding useful helper methods. +pub trait HeaderExt: BlockHeader { + /// TODO: remove once is released + /// + /// Returns the parent block's number and hash + /// + /// Note: for the genesis block the parent number is 0 and the parent hash is the zero hash. + fn parent_num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number().saturating_sub(1), self.parent_hash()) + } +} + +impl HeaderExt for T {} diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 66f85f87bf6..db2beb4a454 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -42,8 +42,12 @@ where where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, - Events: - CanonStateSubscriptions>, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let fee_history_cache = FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index e01578661f3..922c3f9d474 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -212,7 +212,7 @@ pub async fn fee_history_cache_new_blocks_task( ) where St: Stream> + Unpin + 'static, Provider: BlockReaderIdExt + ChainSpecProvider + 'static, - N: NodePrimitives, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -249,7 +249,7 @@ pub async fn fee_history_cache_new_blocks_task( break; }; - let committed = event .committed(); + let committed = event.committed(); let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index e4efd64f2dd..34500b370e6 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -103,8 +103,12 @@ where ) -> Self where Tasks: TaskSpawner + Clone + 'static, - Events: - CanonStateSubscriptions>, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index bc1e9344799..f6aae34b961 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -32,7 +32,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); return block .body diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 2ff627f737e..8ad809b8b18 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ pubsub::{ @@ -85,8 +86,13 @@ impl EthPubSubApiServer where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions> - + Clone + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat + 'static, @@ -120,8 +126,13 @@ async fn handle_accepted( where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions> - + Clone + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat, @@ -338,8 +349,13 @@ where impl EthPubSubInner where Provider: BlockReader + EvmEnvProvider + 'static, - Events: CanonStateSubscriptions> - + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + 'static, Network: NetworkInfo + 'static, Pool: 'static, { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 26a652e7e67..24a4892c6be 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -57,7 +57,7 @@ pub struct BlockchainProvider2 { pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(crate) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -117,7 +117,7 @@ impl BlockchainProvider2 { } /// Gets a clone of `canonical_in_memory_state`. - pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { + pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { self.canonical_in_memory_state.clone() } @@ -132,8 +132,8 @@ impl BlockchainProvider2 { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider( &self, - state: &BlockState, - ) -> ProviderResult { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; Ok(state.state_provider(latest_historical)) @@ -647,7 +647,7 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 +impl CanonChainTracker for BlockchainProvider2 where Self: BlockReader, { @@ -721,7 +721,7 @@ impl> CanonStateSubscriptions } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { +impl ForkChoiceSubscriptions for BlockchainProvider2 { fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) @@ -2347,7 +2347,7 @@ mod tests { (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() - .unseal() + .unseal::() .with_senders_unchecked(vec![])), (sealed_block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2542,7 +2542,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -2551,7 +2551,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), @@ -2561,7 +2561,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) @@ -2572,7 +2577,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index b788a954134..86921a7f56e 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -8,6 +8,7 @@ use crate::{ }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, }; @@ -48,13 +49,14 @@ use tracing::trace; /// CAUTION: Avoid holding this provider for too long or the inner database transaction will /// time-out. #[derive(Debug)] +#[doc(hidden)] // triggers ICE for `cargo docs` pub struct ConsistentProvider { /// Storage provider. storage_provider: as DatabaseProviderFactory>::Provider, /// Head block at time of [`Self`] creation - head_block: Option>, + head_block: Option>>, /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, } impl ConsistentProvider { @@ -65,7 +67,7 @@ impl ConsistentProvider { /// view of memory and database. pub fn new( storage_provider_factory: ProviderFactory, - state: CanonicalInMemoryState, + state: CanonicalInMemoryState, ) -> ProviderResult { // Each one provides a snapshot at the time of instantiation, but its order matters. // @@ -307,7 +309,7 @@ impl ConsistentProvider { RangeInclusive, &mut P, ) -> ProviderResult>, - G: Fn(&BlockState, &mut P) -> Option, + G: Fn(&BlockState, &mut P) -> Option, P: FnMut(&T) -> bool, { // Each one provides a snapshot at the time of instantiation, but its order matters. @@ -399,8 +401,8 @@ impl ConsistentProvider { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider_ref( &self, - state: &BlockState, - ) -> ProviderResult> { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; let in_memory = state.chain().map(|block_state| block_state.block()).collect(); @@ -423,7 +425,7 @@ impl ConsistentProvider { &DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, - M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -445,7 +447,7 @@ impl ConsistentProvider { let (start, end) = self.convert_range_bounds(range, || { in_mem_chain .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) + .map(|b| b.block_ref().block().body.transactions().len() as u64) .sum::() + last_block_body_index.last_tx_num() }); @@ -477,7 +479,7 @@ impl ConsistentProvider { // Iterate from the lowest block to the highest in-memory chain for block_state in in_mem_chain.iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let block_tx_count = block_state.block_ref().block().body.transactions().len(); let remaining = (tx_range.end() - tx_range.start() + 1) as usize; // If the transaction range start is equal or higher than the next block first @@ -519,7 +521,7 @@ impl ConsistentProvider { ) -> ProviderResult> where S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -551,10 +553,10 @@ impl ConsistentProvider { let executed_block = block_state.block_ref(); let block = executed_block.block(); - for tx_index in 0..block.body.transactions.len() { + for tx_index in 0..block.body.transactions().len() { match id { HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { + if tx_hash == block.body.transactions()[tx_index].trie_hash() { return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) } } @@ -586,7 +588,7 @@ impl ConsistentProvider { ) -> ProviderResult where S: FnOnce(&DatabaseProviderRO) -> ProviderResult, - M: Fn(&BlockState) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, { if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { return fetch_from_block_state(block_state) @@ -839,7 +841,7 @@ impl BlockReader for ConsistentProvider { return Ok(Some(Vec::new())) } - Ok(Some(block_state.block_ref().block().body.ommers.clone())) + Ok(block_state.block_ref().block().body.ommers().map(|o| o.to_vec())) }, ) } @@ -865,7 +867,7 @@ impl BlockReader for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + let block_tx_count = state.block_ref().block.body.transactions().len() as u64; if state.block_ref().block().number == number { stored_indices.tx_count = block_tx_count; } else { @@ -962,7 +964,7 @@ impl TransactionsProvider for ConsistentProvider { .block_ref() .block() .body - .transactions + .transactions() .get(tx_index) .cloned() .map(Into::into)) @@ -982,7 +984,7 @@ impl TransactionsProvider for ConsistentProvider { .block_ref() .block() .body - .transactions + .transactions() .get(tx_index) .cloned() .map(Into::into)) @@ -1050,11 +1052,7 @@ impl TransactionsProvider for ConsistentProvider { range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) + Ok(block_state.block_ref().block().body.transactions()[index_range].to_vec()) }, ) } @@ -1098,12 +1096,13 @@ impl ReceiptProvider for ConsistentProvider { // assuming 1:1 correspondence between transactions and receipts debug_assert_eq!( - block.body.transactions.len(), + block.body.transactions().len(), receipts.len(), "Mismatch between transaction and receipt count" ); - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + if let Some(tx_index) = + block.body.transactions().iter().position(|tx| tx.trie_hash() == hash) { // safe to use tx_index for receipts due to 1:1 correspondence return Ok(receipts.get(tx_index).cloned()); @@ -1181,7 +1180,7 @@ impl WithdrawalsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + |block_state| Ok(block_state.block_ref().block().body.withdrawals().cloned()), ) } @@ -1196,8 +1195,8 @@ impl WithdrawalsProvider for ConsistentProvider { .block_ref() .block() .body - .withdrawals - .clone() + .withdrawals() + .cloned() .and_then(|mut w| w.pop())) }, ) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1e64b4f8491..e50f9760d6b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -47,7 +47,7 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_node_types::{BlockTy, BodyTy, NodeTypes, TxTy}; use reth_primitives::{ - Account, Block, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSignedNoHash, }; @@ -1229,7 +1229,7 @@ impl BlockReader for DatabaseProvid .pop() .ok_or(ProviderError::InvalidStorageOutput)?; - return Ok(Some(Block { header, body })) + return Ok(Some(Self::Block::new(header, body))) } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 2b8a9d4ec4d..ab15093ac5e 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -21,12 +21,13 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; use reth_primitives::{ - Account, BlockWithSenders, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, + Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -77,10 +78,8 @@ where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx = TransactionSigned, + SignedTx: Value, BlockHeader = alloy_consensus::Header, - BlockBody = reth_primitives::BlockBody, - Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, >, >, @@ -92,10 +91,8 @@ impl NodeTypesForProvider for T where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx = TransactionSigned, + SignedTx: Value, BlockHeader = alloy_consensus::Header, - BlockBody = reth_primitives::BlockBody, - Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, >, > @@ -108,9 +105,18 @@ where Self: NodeTypesForProvider + NodeTypesWithDB, { } - impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} +/// A helper trait with requirements for [`NodeTypesForProvider`] to be used within legacy +/// blockchain tree. +pub trait NodeTypesForTree: NodeTypesForProvider {} +impl NodeTypesForTree for T where T: NodeTypesForProvider {} + +/// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain +/// tree. +pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} +impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} + /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data @@ -342,7 +348,7 @@ impl BlockIdReader for BlockchainProvider { } } -impl BlockReader for BlockchainProvider { +impl BlockReader for BlockchainProvider { type Block = BlockTy; fn find_block_by_hash( diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index b3670496b5a..3fdcbe8b4ea 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,8 +1,9 @@ //! Support for maintaining the blob pool. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; -use reth_primitives_traits::BlockBody as _; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction, TxType}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. @@ -38,14 +39,17 @@ impl BlobStoreCanonTracker { /// /// Note: In case this is a chain that's part of a reorg, this replaces previously tracked /// blocks. - pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_, B>) + where + B: Block>, + { let blob_txs = blocks.iter().map(|(num, block)| { let iter = block .body .transactions() .iter() - .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash()); + .filter(|tx| tx.tx_type().is_eip4844()) + .map(|tx| tx.trie_hash()); (*num, iter) }); self.add_blocks(blob_txs); From 5dc914be804967e91bc6a3adad3aa8c4cc66c0bb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 17:38:12 +0100 Subject: [PATCH 710/970] chore: make cargo t compile for reth-trie-common (#12884) --- crates/trie/common/Cargo.toml | 13 ++++++++----- crates/trie/common/src/hash_builder/state.rs | 2 +- crates/trie/common/src/nibbles.rs | 4 ++-- crates/trie/common/src/proofs.rs | 6 +++--- crates/trie/common/src/storage.rs | 2 +- crates/trie/common/src/updates.rs | 18 +++++++++--------- 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 29993ab13dd..8b0d930b0c2 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -31,7 +31,6 @@ nybbles = { workspace = true, features = ["rlp"] } # `serde` feature serde = { workspace = true, optional = true } -# `serde-bincode-compat` feature serde_with = { workspace = true, optional = true } # `test-utils` feature @@ -40,16 +39,19 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["serde"] } alloy-primitives = { workspace = true, features = ["getrandom"] } -alloy-trie = { workspace = true, features = ["arbitrary"] } +alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } hash-db = "=0.15.2" plain_hasher = "0.2" -serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true criterion.workspace = true bincode.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true [features] serde = [ @@ -64,9 +66,10 @@ serde = [ "reth-codecs/serde" ] serde-bincode-compat = [ - "serde_with", + "serde", "reth-primitives-traits/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat" + "alloy-consensus/serde-bincode-compat", + "dep:serde_with" ] test-utils = [ "dep:plain_hasher", diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index ee2d1d00c01..ec6b102d44e 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -7,7 +7,7 @@ use reth_codecs::Compact; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. #[derive(Debug, Clone, PartialEq, Eq, Default)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 402ba811069..2d4e34b3e3b 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -6,7 +6,7 @@ pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, derive_more::Index)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); @@ -63,7 +63,7 @@ impl Compact for StoredNibbles { /// The representation of nibbles of the merkle trie stored in the database. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deref)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index eabc3a165c7..1bc1a1a082f 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -171,8 +171,8 @@ impl StorageMultiProof { /// The merkle proof with the relevant account info. #[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "camelCase"))] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -228,7 +228,7 @@ impl AccountProof { /// The merkle proof of the storage entry. #[derive(Clone, PartialEq, Eq, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageProof { /// The raw storage key. pub key: B256, diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 07cfde916b4..cf2945d9101 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -3,7 +3,7 @@ use reth_codecs::Compact; /// Account storage trie node. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { /// The nibbles of the intermediate node pub nibbles: StoredNibblesSubKey, diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 76aa37a4778..4e780a853ba 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -4,13 +4,13 @@ use std::collections::{HashMap, HashSet}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { /// Collection of updated intermediate account nodes indexed by full path. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] pub account_nodes: HashMap, /// Collection of removed intermediate account nodes indexed by full path. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] pub removed_nodes: HashSet, /// Collection of updated storage tries indexed by the hashed address. pub storage_tries: HashMap, @@ -112,15 +112,15 @@ impl TrieUpdates { /// Trie updates for storage trie of a single account. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. pub is_deleted: bool, /// Collection of updated storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] pub storage_nodes: HashMap, /// Collection of removed storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] pub removed_nodes: HashSet, } @@ -225,7 +225,7 @@ impl StorageTrieUpdates { /// hex-encoded packed representation. /// /// This also sorts the set before serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_set { use crate::Nibbles; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; @@ -261,7 +261,7 @@ mod serde_nibbles_set { /// hex-encoded packed representation. /// /// This also sorts the map's keys before encoding and serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_map { use crate::Nibbles; use alloy_primitives::hex; @@ -403,7 +403,7 @@ fn exclude_empty_from_pair( } /// Bincode-compatible trie updates type serde implementations. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { use crate::{BranchNodeCompact, Nibbles}; use alloy_primitives::B256; From 83af493179393c81dc2c48374306f2b88cc075db Mon Sep 17 00:00:00 2001 From: frisitano <35734660+frisitano@users.noreply.github.com> Date: Wed, 27 Nov 2024 00:48:32 +0800 Subject: [PATCH 711/970] Introduce StateCommitment in StateProviders (#12602) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/stages/stages/src/stages/execution.rs | 7 ++-- .../provider/src/providers/consistent.rs | 2 +- .../src/providers/database/provider.rs | 17 +++++---- .../src/providers/state/historical.rs | 35 ++++++++++++------- .../provider/src/providers/state/latest.rs | 24 +++++++++---- crates/storage/storage-api/Cargo.toml | 1 + crates/storage/storage-api/src/state.rs | 7 ++++ 8 files changed, 65 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 976058b5d4b..2b754e8af29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9230,6 +9230,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie", + "reth-trie-db", ] [[package]] diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index e1c25c7d5fa..f7ee52fbfe3 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -16,8 +16,8 @@ use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimit use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, - StatsReader, StorageLocation, TransactionVariant, + OriginalValuesKnown, ProviderError, StateChangeWriter, StateCommitmentProvider, StateWriter, + StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -180,7 +180,8 @@ where + StatsReader + StateChangeWriter + BlockHashReader - + StateWriter, + + StateWriter + + StateCommitmentProvider, { /// Return the id of the stage fn id(&self) -> StageId { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 86921a7f56e..64a940190db 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -115,7 +115,7 @@ impl ConsistentProvider { Ok(self.block_state_provider_ref(state)?.boxed()) } else { trace!(target: "providers::blockchain", "Using database state for latest state provider"); - self.storage_provider.latest() + Ok(self.storage_provider.latest()) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e50f9760d6b..750a186504a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,9 +15,10 @@ use crate::{ HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + StateChangeWriter, StateCommitmentProvider, StateProviderBox, StateReader, StateWriter, + StaticFileProviderFactory, StatsReader, StorageLocation, StorageReader, StorageTrieWriter, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, + WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -157,10 +158,10 @@ impl DatabaseProvider { } impl DatabaseProvider { - /// State provider for latest block - pub fn latest<'a>(&'a self) -> ProviderResult> { + /// State provider for latest state + pub fn latest<'a>(&'a self) -> Box { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(self))) + Box::new(LatestStateProviderRef::new(self)) } /// Storage provider for state at that given block hash @@ -378,6 +379,10 @@ impl TryIntoHistoricalStateProvider for Databa } } +impl StateCommitmentProvider for DatabaseProvider { + type StateCommitment = N::StateCommitment; +} + impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index ca036844f65..ad36a4a5ab3 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -15,7 +15,9 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{BlockNumReader, DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + BlockNumReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, @@ -59,7 +61,9 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, Provider: DBProvider + BlockNumReader> HistoricalStateProviderRef<'b, Provider> { +impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> + HistoricalStateProviderRef<'b, Provider> +{ /// Create new `StateProvider` for historical block number pub fn new(provider: &'b Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -240,7 +244,7 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. @@ -281,7 +285,7 @@ impl BlockHashReader } } -impl StateRootProvider +impl StateRootProvider for HistoricalStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { @@ -317,7 +321,7 @@ impl StateRootProvider } } -impl StorageRootProvider +impl StorageRootProvider for HistoricalStateProviderRef<'_, Provider> { fn storage_root( @@ -356,7 +360,7 @@ impl StorageRootProvider } } -impl StateProofProvider +impl StateProofProvider for HistoricalStateProviderRef<'_, Provider> { /// Get account and storage proofs. @@ -390,8 +394,8 @@ impl StateProofProvider } } -impl StateProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. fn storage( @@ -441,7 +445,9 @@ pub struct HistoricalStateProvider { lowest_available_blocks: LowestAvailableBlocks, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number pub fn new(provider: Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -477,7 +483,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -521,7 +527,10 @@ mod tests { transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -530,7 +539,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 67dd1e74471..a2ec4972d10 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -9,7 +9,9 @@ use alloy_primitives::{ use reth_db::tables; use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, @@ -62,7 +64,9 @@ impl BlockHashReader for LatestStateProviderRef<'_, P } } -impl StateRootProvider for LatestStateProviderRef<'_, Provider> { +impl StateRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { StateRoot::overlay_root(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) @@ -90,7 +94,9 @@ impl StateRootProvider for LatestStateProviderRef<'_, Prov } } -impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { +impl StorageRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, @@ -121,7 +127,9 @@ impl StorageRootProvider for LatestStateProviderRef<'_, Pr } } -impl StateProofProvider for LatestStateProviderRef<'_, Provider> { +impl StateProofProvider + for LatestStateProviderRef<'_, Provider> +{ fn proof( &self, input: TrieInput, @@ -149,7 +157,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, Pro } } -impl StateProvider +impl StateProvider for LatestStateProviderRef<'_, Provider> { /// Get storage. @@ -191,7 +199,7 @@ impl LatestStateProvider { } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); #[cfg(test)] mod tests { @@ -199,7 +207,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider< + T: DBProvider + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } } diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index c059eb0d6e9..ba2ccf1b157 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -23,6 +23,7 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true reth-db.workspace = true # ethereum diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 3174489fc4a..0cb26d30743 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -8,6 +8,7 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue use auto_impl::auto_impl; use reth_primitives::Bytecode; use reth_storage_errors::provider::ProviderResult; +use reth_trie_db::StateCommitment; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -81,6 +82,12 @@ pub trait StateProvider: } } +/// Trait implemented for database providers that can provide the [`StateCommitment`] type. +pub trait StateCommitmentProvider { + /// The [`StateCommitment`] type that can be used to perform state commitment operations. + type StateCommitment: StateCommitment; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. From 38cf6c900ddd2a2266acf01c97abc159db314a07 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 26 Nov 2024 22:26:22 +0400 Subject: [PATCH 712/970] refactor: improve state writing functions for db provider (#12885) --- crates/stages/stages/src/stages/execution.rs | 2 +- .../src/providers/database/provider.rs | 112 ++---------------- crates/storage/provider/src/traits/state.rs | 11 +- 3 files changed, 18 insertions(+), 107 deletions(-) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index f7ee52fbfe3..3c31dea91f9 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -411,7 +411,7 @@ where // Unwind account and storage changesets, as well as receipts. // // This also updates `PlainStorageState` and `PlainAccountState`. - let bundle_state_with_receipts = provider.take_state(range.clone())?; + let bundle_state_with_receipts = provider.take_state_above(unwind_to)?; // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 750a186504a..8e4d22067df 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,7 +15,7 @@ use crate::{ HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateCommitmentProvider, StateProviderBox, StateReader, StateWriter, + StateChangeWriter, StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, @@ -679,94 +679,6 @@ impl DatabaseProvider { }) } - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|bodies| bodies.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|bodies| bodies.1.last_tx_num()) - else { - return Ok(None) - }; - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.get::(storage_range)?; - let account_changeset = self.get::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_read::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_read::()?; - - let (state, reverts) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.get::(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); - } - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the /// [`PlainAccountState`] and [`PlainStorageState`] tables, based on the given storage and /// account changesets. @@ -2039,9 +1951,11 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { + fn remove_state_above(&self, block: BlockNumber) -> ProviderResult<()> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { - return Ok(()) + return Ok(()); } // We are not removing block meta as it is used to get block changesets. @@ -2131,7 +2045,9 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state(&self, range: RangeInclusive) -> ProviderResult { + fn take_state_above(&self, block: BlockNumber) -> ProviderResult { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { return Ok(ExecutionOutcome::default()) } @@ -2672,12 +2588,6 @@ impl HistoryWriter for DatabaseProvi } } -impl StateReader for DatabaseProvider { - fn get_state(&self, block: BlockNumber) -> ProviderResult> { - self.get_state(block..=block) - } -} - impl BlockExecutionWriter for DatabaseProvider { @@ -2691,7 +2601,7 @@ impl BlockExecu self.unwind_trie_state_range(range.clone())?; // get execution res - let execution_state = self.take_state(range.clone())?; + let execution_state = self.take_state_above(block)?; let blocks = self.sealed_block_with_senders_range(range)?; @@ -2712,10 +2622,10 @@ impl BlockExecu ) -> ProviderResult<()> { let range = block + 1..=self.last_block_number()?; - self.unwind_trie_state_range(range.clone())?; + self.unwind_trie_state_range(range)?; // remove execution res - self.remove_state(range)?; + self.remove_state_above(block)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index ec189a95e3d..057d3a19a7e 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,7 +6,6 @@ use revm::db::{ states::{PlainStateReverts, StateChangeset}, OriginalValuesKnown, }; -use std::ops::RangeInclusive; use super::StorageLocation; @@ -39,9 +38,11 @@ pub trait StateChangeWriter { /// Writes the hashed state changes to the database fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>; - /// Remove the block range of state. - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()>; + /// Remove the block range of state above the given block. The state of the passed block is not + /// removed. + fn remove_state_above(&self, block: BlockNumber) -> ProviderResult<()>; - /// Take the block range of state, recreating the [`ExecutionOutcome`]. - fn take_state(&self, range: RangeInclusive) -> ProviderResult; + /// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed + /// block is not removed. + fn take_state_above(&self, block: BlockNumber) -> ProviderResult; } From e8d63e4a0b060e1f8327059da78b5d0941aa6733 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 20:32:54 +0100 Subject: [PATCH 713/970] chore: fix unused warning (#12888) --- crates/evm/execution-types/Cargo.toml | 9 +++++---- crates/evm/execution-types/src/chain.rs | 2 +- crates/evm/execution-types/src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 36b36fc0216..c7fbad673db 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -14,7 +14,7 @@ workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true -reth-trie-common.workspace = true +reth-trie-common = { workspace = true, optional = true } reth-trie.workspace = true revm.workspace = true @@ -43,14 +43,15 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "reth-primitives-traits/serde", - "alloy-consensus/serde", - "reth-trie-common/serde", + "alloy-consensus/serde", "reth-trie/serde", + "reth-trie-common?/serde" ] serde-bincode-compat = [ + "serde", + "reth-trie-common/serde-bincode-compat", "reth-primitives/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "reth-trie-common/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 339a188166d..1767a7f43f6 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -520,7 +520,7 @@ pub enum ChainSplit { } /// Bincode-compatible [`Chain`] serde implementation. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { use crate::ExecutionOutcome; use alloc::borrow::Cow; diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f98ebfe73a5..fb872cd596e 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -26,7 +26,7 @@ pub use execution_outcome::*; /// all fields are serialized. /// /// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::chain::serde_bincode_compat::*; } From 02f3427dae29d66ba1eac49265f7b450ee613982 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 26 Nov 2024 23:25:42 +0100 Subject: [PATCH 714/970] feat: introduce networkprimitives in transition fetcher (#12889) --- .../net/network/src/transactions/fetcher.rs | 43 ++++++++++--------- crates/net/network/src/transactions/mod.rs | 12 +++--- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 0833f677409..180a619fff9 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -45,6 +45,7 @@ use reth_eth_wire::{ DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; +use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; @@ -68,7 +69,7 @@ use validation::FilterOutcome; /// new requests on announced hashes. #[derive(Debug)] #[pin_project] -pub struct TransactionFetcher { +pub struct TransactionFetcher { /// All peers with to which a [`GetPooledTransactions`] request is inflight. pub active_peers: LruMap, /// All currently active [`GetPooledTransactions`] requests. @@ -77,7 +78,7 @@ pub struct TransactionFetcher { /// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to /// be fetched. #[pin] - pub inflight_requests: FuturesUnordered, + pub inflight_requests: FuturesUnordered>, /// Hashes that are awaiting an idle fallback peer so they can be fetched. /// /// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for @@ -93,9 +94,7 @@ pub struct TransactionFetcher { metrics: TransactionFetcherMetrics, } -// === impl TransactionFetcher === - -impl TransactionFetcher { +impl TransactionFetcher { /// Removes the peer from the active set. pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { self.active_peers.remove(peer_id); @@ -429,7 +428,7 @@ impl TransactionFetcher { /// the request by checking the transactions seen by the peer against the buffer. pub fn on_fetch_pending_hashes( &mut self, - peers: &HashMap, + peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, ) { let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info); @@ -632,7 +631,7 @@ impl TransactionFetcher { pub fn request_transactions_from_peer( &mut self, new_announced_hashes: RequestTxHashes, - peer: &PeerMetadata, + peer: &PeerMetadata, ) -> Option { let peer_id: PeerId = peer.request_tx.peer_id; let conn_eth_version = peer.version; @@ -896,7 +895,9 @@ impl TransactionFetcher { approx_capacity_get_pooled_transactions_req_eth66() } } +} +impl TransactionFetcher { /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). @@ -1044,7 +1045,7 @@ impl Stream for TransactionFetcher { } } -impl Default for TransactionFetcher { +impl Default for TransactionFetcher { fn default() -> Self { Self { active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS), @@ -1091,13 +1092,13 @@ impl TxFetchMetadata { /// Represents possible events from fetching transactions. #[derive(Debug)] -pub enum FetchEvent { +pub enum FetchEvent { /// Triggered when transactions are successfully fetched. TransactionsFetched { /// The ID of the peer from which transactions were fetched. peer_id: PeerId, /// The transactions that were fetched, if available. - transactions: PooledTransactions, + transactions: PooledTransactions, }, /// Triggered when there is an error in fetching transactions. FetchError { @@ -1115,22 +1116,22 @@ pub enum FetchEvent { /// An inflight request for [`PooledTransactions`] from a peer. #[derive(Debug)] -pub struct GetPooledTxRequest { +pub struct GetPooledTxRequest { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, } /// Upon reception of a response, a [`GetPooledTxRequest`] is deconstructed to form a /// [`GetPooledTxResponse`]. #[derive(Debug)] -pub struct GetPooledTxResponse { +pub struct GetPooledTxResponse { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes, since peer may only return a /// subset of requested hashes. requested_hashes: RequestTxHashes, - result: Result, RecvError>, + result: Result>, RecvError>, } /// Stores the response receiver made by sending a [`GetPooledTransactions`] request to a peer's @@ -1138,24 +1139,24 @@ pub struct GetPooledTxResponse { #[must_use = "futures do nothing unless polled"] #[pin_project::pin_project] #[derive(Debug)] -pub struct GetPooledTxRequestFut { +pub struct GetPooledTxRequestFut { #[pin] - inner: Option, + inner: Option>, } -impl GetPooledTxRequestFut { +impl GetPooledTxRequestFut { #[inline] const fn new( peer_id: PeerId, requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, ) -> Self { Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) } } } -impl Future for GetPooledTxRequestFut { - type Output = GetPooledTxResponse; +impl Future for GetPooledTxRequestFut { + type Output = GetPooledTxResponse; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut req = self.as_mut().project().inner.take().expect("polled after completion"); @@ -1372,7 +1373,7 @@ mod test { // RIG TEST - let tx_fetcher = &mut TransactionFetcher::default(); + let tx_fetcher = &mut TransactionFetcher::::default(); let eth68_hashes = [ B256::from_slice(&[1; 32]), diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index ff76a6d2921..d533aee102b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -212,7 +212,7 @@ pub struct TransactionsManager>>, /// Transaction fetcher to handle inflight and missing transaction requests. - transaction_fetcher: TransactionFetcher, + transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same @@ -235,7 +235,7 @@ pub struct TransactionsManager, /// All the connected peers. - peers: HashMap, + peers: HashMap>, /// Send half for the command channel. /// /// This is kept so that a new [`TransactionsHandle`] can be created at any time. @@ -1731,23 +1731,23 @@ impl TransactionSource { /// Tracks a single peer in the context of [`TransactionsManager`]. #[derive(Debug)] -pub struct PeerMetadata { +pub struct PeerMetadata { /// Optimistically keeps track of transactions that we know the peer has seen. Optimistic, in /// the sense that transactions are preemptively marked as seen by peer when they are sent to /// the peer. seen_transactions: LruCache, /// A communication channel directly to the peer's session task. - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, /// negotiated version of the session. version: EthVersion, /// The peer's client version. client_version: Arc, } -impl PeerMetadata { +impl PeerMetadata { /// Returns a new instance of [`PeerMetadata`]. fn new( - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, version: EthVersion, client_version: Arc, max_transactions_seen_by_peer: u32, From 6b3c7c651e1299a830987b686bc9beacc553757b Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:50:32 +0700 Subject: [PATCH 715/970] fix(ci): warn instead of failing no tests (#12893) --- .github/workflows/unit.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index e89ad903d80..11ef24b5f1b 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -61,7 +61,8 @@ jobs: - name: Run tests run: | cargo nextest run \ - ${{ matrix.args }} --workspace --exclude ef-tests \ + ${{ matrix.args }} --workspace \ + --exclude ef-tests --no-tests=warn \ --partition hash:${{ matrix.partition }}/2 \ -E "!kind(test)" From acfcfbdcf3ed9c31016cf6fe697f818de74e0312 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:08:19 +0700 Subject: [PATCH 716/970] perf: minimize clones when saving blocks (#12870) --- crates/engine/tree/src/persistence.rs | 2 +- .../src/providers/blockchain_provider.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 26 +++++++++---------- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 50eb6aa2803..950310b170f 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -147,7 +147,7 @@ impl PersistenceService { let provider_rw = self.provider.database_provider_rw()?; let static_file_provider = self.provider.static_file_provider(); - UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; + UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(blocks)?; UnifiedStorageWriter::commit(provider_rw)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 24a4892c6be..1dd1e47ec6a 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1009,7 +1009,7 @@ mod tests { // Push to disk let provider_rw = hook_provider.database_provider_rw().unwrap(); UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) - .save_blocks(&[lowest_memory_block]) + .save_blocks(vec![lowest_memory_block]) .unwrap(); UnifiedStorageWriter::commit(provider_rw).unwrap(); diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index a88234ba305..d4d5116de97 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -10,6 +10,7 @@ use reth_primitives::StaticFileSegment; use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; +use std::sync::Arc; use tracing::debug; /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database @@ -130,7 +131,7 @@ where + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. - pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "provider::storage_writer", "Attempted to write empty block range"); return Ok(()) @@ -138,13 +139,13 @@ where // NOTE: checked non-empty above let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block().clone(); + let last_block = blocks.last().unwrap().block(); let first_number = first_block.number; let last_block_number = last_block.number; debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - // TODO: remove all the clones and do performant / batched writes for each type of object + // TODO: Do performant / batched writes for each type of object // instead of a loop over all blocks, // meaning: // * blocks @@ -153,27 +154,24 @@ where // * trie updates (cannot naively extend, need helper) // * indices (already done basically) // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks { + let sealed_block = Arc::unwrap_or_clone(block) + .try_with_senders_unchecked(Arc::unwrap_or_clone(senders)) + .unwrap(); self.database().insert_block(sealed_block, StorageLocation::Both)?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); self.database().write_to_storage( - execution_outcome, + Arc::unwrap_or_clone(execution_output), OriginalValuesKnown::No, StorageLocation::StaticFiles, )?; // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - self.database().write_hashed_state(&hashed_state.clone().into_sorted())?; - self.database().write_trie_updates(&trie_updates)?; - } + self.database() + .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; + self.database().write_trie_updates(&trie)?; } // update history indices From 3b8c661ad4e7e7a5b1656d5bebe8073755903b75 Mon Sep 17 00:00:00 2001 From: Pelle <78560773+PelleKrab@users.noreply.github.com> Date: Wed, 27 Nov 2024 01:42:39 -0700 Subject: [PATCH 717/970] added generic header (#12897) Co-authored-by: router --- crates/chain-state/src/notifications.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 2aae56aea2a..c4e0415436a 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -155,7 +155,9 @@ impl CanonStateNotification { /// Wrapper around a broadcast receiver that receives fork choice notifications. #[derive(Debug, Deref, DerefMut)] -pub struct ForkChoiceNotifications(pub watch::Receiver>); +pub struct ForkChoiceNotifications( + pub watch::Receiver>>, +); /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. From 51afa4cdc94fc29909cb2b6eda1c30c11e102c00 Mon Sep 17 00:00:00 2001 From: Z <12710516+zitup@users.noreply.github.com> Date: Wed, 27 Nov 2024 17:54:39 +0800 Subject: [PATCH 718/970] chore(sdk): Add MaybeArbitrary to all ATs on NodePrimitives (#12847) --- crates/primitives-traits/src/node.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 8081a453ca3..4adf258177e 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -2,7 +2,7 @@ use core::fmt; use crate::{ Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, - FullSignedTx, FullTxType, MaybeSerde, + FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, }; /// Configures all the primitive types of the node. @@ -16,9 +16,27 @@ pub trait NodePrimitives: /// Block body primitive. type BlockBody: BlockBody; /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; + type SignedTx: Send + + Sync + + Unpin + + Clone + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + MaybeArbitrary + + 'static; /// Transaction envelope type ID. - type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static; + type TxType: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeArbitrary + + 'static; /// A receipt. type Receipt: Send + Sync @@ -29,6 +47,7 @@ pub trait NodePrimitives: + PartialEq + Eq + MaybeSerde + + MaybeArbitrary + 'static; } /// Helper trait that sets trait bounds on [`NodePrimitives`]. From b33757fcbe35a80cd782c1a57d629a004ef1b9a9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 12:31:24 +0100 Subject: [PATCH 719/970] feat: extends engine validator (#12900) --- Cargo.lock | 4 + crates/engine/primitives/Cargo.toml | 2 + crates/engine/primitives/src/lib.rs | 45 ++++++- crates/ethereum/engine-primitives/Cargo.toml | 1 + crates/ethereum/engine-primitives/src/lib.rs | 27 +++- crates/optimism/node/Cargo.toml | 1 + crates/optimism/node/src/engine.rs | 122 +++++++++++-------- crates/payload/primitives/src/error.rs | 20 ++- crates/payload/primitives/src/lib.rs | 5 +- examples/custom-engine-types/src/main.rs | 58 +++++++-- 10 files changed, 216 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b754e8af29..9ed5ce894bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7194,6 +7194,7 @@ dependencies = [ name = "reth-engine-primitives" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -7202,6 +7203,7 @@ dependencies = [ "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-trie", "serde", "thiserror 1.0.69", @@ -7431,6 +7433,7 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-payload-primitives", + "reth-payload-validator", "reth-primitives", "reth-rpc-types-compat", "serde", @@ -8348,6 +8351,7 @@ dependencies = [ "reth-optimism-rpc", "reth-payload-builder", "reth-payload-util", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 42cbd932d45..2da1be9c928 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -16,11 +16,13 @@ reth-execution-types.workspace = true reth-payload-primitives.workspace = true reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-trie.workspace = true reth-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true # async diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 3429edc2867..89fb7459b7d 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -9,6 +9,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; + +use alloy_consensus::BlockHeader; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use error::BeaconOnNewPayloadError; mod forkchoice; @@ -24,6 +27,9 @@ pub use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_primitives::{InvalidPayloadAttributesError, PayloadAttributes}; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::Block; use serde::{de::DeserializeOwned, ser::Serialize}; /// This type defines the versioned types of the engine API. @@ -74,8 +80,11 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads sent to the engine. +/// Type that validates the payloads processed by the engine. pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { + /// The block type used by the engine. + type Block: Block; + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -90,4 +99,38 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st version: EngineApiMessageVersion, attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) + fn validate_payload_attributes_against_header( + &self, + attr: &::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index e9bcd425686..f019f6e5f2a 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true reth-rpc-types-compat.workspace = true alloy-rlp.workspace = true reth-chain-state.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 5addf2a18c5..beefd54ca05 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -11,6 +11,7 @@ mod payload; use std::sync::Arc; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, @@ -22,6 +23,8 @@ use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlock}; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -63,13 +66,19 @@ impl PayloadTypes for EthPayloadTypes { /// Validator for the ethereum engine API. #[derive(Debug, Clone)] pub struct EthereumEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl EthereumEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() } } @@ -77,12 +86,14 @@ impl EngineValidator for EthereumEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -90,6 +101,14 @@ where version: EngineApiMessageVersion, attributes: &EthPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into()) + validate_version_specific_fields(self.chain_spec(), version, attributes.into()) + } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) } } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index fbc055a82e9..f11eb357832 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -18,6 +18,7 @@ reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-util.workspace = true +reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index dd4d0c13f24..57b76b904bd 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,6 +1,7 @@ -use std::sync::Arc; - -use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, +}; use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, }; @@ -16,6 +17,9 @@ use reth_node_api::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlockFor}; +use std::sync::Arc; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -57,76 +61,42 @@ impl PayloadTypes for OpPayloadTypes { /// Validator for Optimism engine API. #[derive(Debug, Clone)] pub struct OpEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } } -} - -/// Validates the presence of the `withdrawals` field according to the payload timestamp. -/// -/// After Canyon, withdrawals field must be [Some]. -/// Before Canyon, withdrawals field must be [None]; -/// -/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: -/// -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, - version: EngineApiMessageVersion, - message_validation_kind: MessageValidationKind, - timestamp: u64, - has_withdrawals: bool, -) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); - - match version { - EngineApiMessageVersion::V1 => { - if has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) - } - if is_shanghai { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { - if is_shanghai && !has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - if !is_shanghai && has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) - } - } - }; - Ok(()) + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &OpChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for OpEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_withdrawals_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), payload_or_attrs.withdrawals().is_some(), )?; validate_parent_beacon_block_root_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), @@ -139,7 +109,7 @@ where version: EngineApiMessageVersion, attributes: &OpPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; if attributes.gas_limit.is_none() { return Err(EngineObjectValidationError::InvalidParams( @@ -147,7 +117,9 @@ where )) } - if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + if self + .chain_spec() + .is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) { let (elasticity, denominator) = attributes.decode_eip_1559_params().ok_or_else(|| { @@ -164,6 +136,56 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + +/// Validates the presence of the `withdrawals` field according to the payload timestamp. +/// +/// After Canyon, withdrawals field must be [Some]. +/// Before Canyon, withdrawals field must be [None]; +/// +/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: +/// +pub fn validate_withdrawals_presence( + chain_spec: &ChainSpec, + version: EngineApiMessageVersion, + message_validation_kind: MessageValidationKind, + timestamp: u64, + has_withdrawals: bool, +) -> Result<(), EngineObjectValidationError> { + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); + + match version { + EngineApiMessageVersion::V1 => { + if has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) + } + if is_shanghai { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + } + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai && !has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + if !is_shanghai && has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) + } + } + }; + + Ok(()) } #[cfg(test)] diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index d2e57da5791..ffe4e027e96 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,6 +1,7 @@ //! Error types emitted by types or implementations of this crate. use alloy_primitives::B256; +use alloy_rpc_types_engine::ForkchoiceUpdateError; use reth_errors::{ProviderError, RethError}; use revm_primitives::EVMError; use tokio::sync::oneshot; @@ -53,7 +54,7 @@ impl From for PayloadBuilderError { } } -/// Thrown when the payload or attributes are known to be invalid before processing. +/// Thrown when the payload or attributes are known to be invalid __before__ processing. /// /// This is used mainly for /// [`validate_version_specific_fields`](crate::validate_version_specific_fields), which validates @@ -115,3 +116,20 @@ impl EngineObjectValidationError { Self::InvalidParams(Box::new(error)) } } + +/// Thrown when validating the correctness of a payloadattributes object. +#[derive(thiserror::Error, Debug)] +pub enum InvalidPayloadAttributesError { + /// Thrown if the timestamp of the payload attributes is invalid according to the engine specs. + #[error("parent beacon block root not supported before V3")] + InvalidTimestamp, + /// Another type of error that is not covered by the above variants. + #[error("Invalid params: {0}")] + InvalidParams(#[from] Box), +} + +impl From for ForkchoiceUpdateError { + fn from(_: InvalidPayloadAttributesError) -> Self { + Self::UpdatedInvalidPayloadAttributes + } +} diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 0ff4810b864..523e6fb057a 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,7 +9,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; -pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; +pub use error::{ + EngineObjectValidationError, InvalidPayloadAttributesError, PayloadBuilderError, + VersionSpecificValidationError, +}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index cde68ca6d8e..f9ac5c23865 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -17,11 +17,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use std::{convert::Infallible, sync::Arc}; - -use serde::{Deserialize, Serialize}; -use thiserror::Error; - use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; @@ -33,7 +28,7 @@ use alloy_rpc_types::{ Withdrawal, }; use reth::{ - api::PayloadTypes, + api::{InvalidPayloadAttributesError, PayloadTypes}, builder::{ components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, @@ -42,9 +37,13 @@ use reth::{ PayloadBuilderConfig, }, network::NetworkHandle, - primitives::EthPrimitives, + payload::ExecutionPayloadValidator, + primitives::{Block, EthPrimitives, SealedBlockFor}, providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, - rpc::eth::EthApi, + rpc::{ + eth::EthApi, + types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, + }, tasks::TaskManager, transaction_pool::TransactionPool, }; @@ -72,6 +71,9 @@ use reth_payload_builder::{ }; use reth_tracing::{RethTracer, Tracer}; use reth_trie_db::MerklePatriciaTrie; +use serde::{Deserialize, Serialize}; +use std::{convert::Infallible, sync::Arc}; +use thiserror::Error; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -171,19 +173,34 @@ impl EngineTypes for CustomEngineTypes { /// Custom engine validator #[derive(Debug, Clone)] pub struct CustomEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, +} + +impl CustomEngineValidator { + /// Instantiates a new validator. + pub const fn new(chain_spec: Arc) -> Self { + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for CustomEngineValidator where T: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, T::PayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -191,7 +208,7 @@ where version: EngineApiMessageVersion, attributes: &T::PayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; // custom validation logic - ensure that the custom field is not zero if attributes.custom == 0 { @@ -202,6 +219,23 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } + + fn validate_payload_attributes_against_header( + &self, + _attr: &::PayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } /// Custom engine validator builder @@ -218,7 +252,7 @@ where type Validator = CustomEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.config.chain.clone() }) + Ok(CustomEngineValidator::new(ctx.config.chain.clone())) } } From a0e2961d730998ee825ef540f3b290dfb3c970b7 Mon Sep 17 00:00:00 2001 From: Joseph Zhao <65984904+programskillforverification@users.noreply.github.com> Date: Wed, 27 Nov 2024 19:40:00 +0800 Subject: [PATCH 720/970] chore: make BeaconConsensusEngineEvent generic over data primitives (#12831) Co-authored-by: Emilia Hane --- crates/consensus/beacon/src/engine/event.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index b76b85374cd..b503e1e102a 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,7 +1,8 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -10,23 +11,23 @@ use std::{ /// Events emitted by [`crate::BeaconConsensusEngine`]. #[derive(Clone, Debug)] -pub enum BeaconConsensusEngineEvent { +pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data - CanonicalChainCommitted(Box, Duration), + CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress LiveSyncProgress(ConsensusEngineLiveSyncProgress), } -impl BeaconConsensusEngineEvent { +impl BeaconConsensusEngineEvent { /// Returns the canonical header if the event is a /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. - pub const fn canonical_header(&self) -> Option<&SealedHeader> { + pub const fn canonical_header(&self) -> Option<&SealedHeader> { match self { Self::CanonicalChainCommitted(header, _) => Some(header), _ => None, @@ -34,7 +35,10 @@ impl BeaconConsensusEngineEvent { } } -impl Display for BeaconConsensusEngineEvent { +impl Display for BeaconConsensusEngineEvent +where + N: NodePrimitives, +{ fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::ForkchoiceUpdated(state, status) => { From 42c24f07d965f977303471ec92d78aa8ecbce96d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 27 Nov 2024 15:43:38 +0400 Subject: [PATCH 721/970] refactor: unify code paths for receipts removal (#12887) --- crates/stages/stages/src/stages/bodies.rs | 46 +--- crates/stages/stages/src/stages/execution.rs | 218 ++++++++---------- crates/stages/stages/src/stages/utils.rs | 41 +++- .../src/providers/database/provider.rs | 98 ++++++-- crates/storage/provider/src/traits/block.rs | 18 +- crates/storage/provider/src/traits/state.rs | 12 +- crates/storage/provider/src/writer/mod.rs | 23 +- 7 files changed, 250 insertions(+), 206 deletions(-) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index c2de9292402..c1fde11c235 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -8,15 +8,13 @@ use reth_codecs::Compact; use reth_primitives_traits::{Block, BlockBody}; use tracing::*; -use alloy_primitives::TxNumber; use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, - StorageLocation, + providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, + StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -24,6 +22,8 @@ use reth_stages_api::{ }; use reth_storage_errors::provider::ProviderResult; +use super::missing_static_data_error; + /// The body stage downloads block bodies. /// /// The body stage downloads block bodies for all block headers stored locally in storage. @@ -128,6 +128,7 @@ impl BodyStage { next_static_file_tx_num.saturating_sub(1), &static_file_provider, provider, + StaticFileSegment::Transactions, )?) } } else { @@ -135,6 +136,7 @@ impl BodyStage { next_static_file_tx_num.saturating_sub(1), &static_file_provider, provider, + StaticFileSegment::Transactions, )?) } } @@ -242,42 +244,6 @@ where } } -/// Called when database is ahead of static files. Attempts to find the first block we are missing -/// transactions for. -fn missing_static_data_error( - last_tx_num: TxNumber, - static_file_provider: &StaticFileProvider, - provider: &Provider, -) -> Result -where - Provider: BlockReader + StaticFileProviderFactory, -{ - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // To be extra safe, we make sure that the last tx num matches the last block from its indices. - // If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_tx_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - Ok(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) -} - // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 3c31dea91f9..297130c34cb 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -14,7 +14,7 @@ use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives::{SealedHeader, StaticFileSegment}; use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateChangeWriter, StateCommitmentProvider, StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, @@ -35,6 +35,8 @@ use std::{ }; use tracing::*; +use super::missing_static_data_error; + /// The execution stage executes all transactions and /// update history indexes. /// @@ -169,6 +171,88 @@ impl ExecutionStage { } Ok(prune_modes) } + + /// Performs consistency check on static files. + /// + /// This function compares the highest receipt number recorded in the database with that in the + /// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. + /// **If the height in the static file is higher**, it rolls back (unwinds) the static file. + /// **Conversely, if the height in the database is lower**, it triggers a rollback in the + /// database (by returning [`StageError`]) until the heights in both the database and static + /// file match. + fn ensure_consistency( + &self, + provider: &Provider, + checkpoint: u64, + unwind_to: Option, + ) -> Result<(), StageError> + where + Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + { + // If thre's any receipts pruning configured, receipts are written directly to database and + // inconsistencies are expected. + if self.prune_modes.has_receipts_pruning() { + return Ok(()) + } + + // Get next expected receipt number + let tx = provider.tx_ref(); + let next_receipt_num = tx + .cursor_read::()? + .seek_exact(checkpoint)? + .map(|(_, value)| value.next_tx_num()) + .unwrap_or(0); + + let static_file_provider = provider.static_file_provider(); + + // Get next expected receipt number in static files + let next_static_file_receipt_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .map(|num| num + 1) + .unwrap_or(0); + + // Check if we had any unexpected shutdown after committing to static files, but + // NOT committing to database. + match next_static_file_receipt_num.cmp(&next_receipt_num) { + // It can be equal when it's a chain of empty blocks, but we still need to update the + // last block in the range. + Ordering::Greater | Ordering::Equal => { + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; + static_file_producer + .prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_to { + let next_receipt_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + if next_receipt_num_after_unwind > next_static_file_receipt_num { + // This means we need a deeper unwind. + } else { + return Ok(()) + } + } + + return Err(missing_static_data_error( + next_static_file_receipt_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Receipts, + )?) + } + } + + Ok(()) + } } impl Stage for ExecutionStage @@ -209,20 +293,7 @@ where let prune_modes = self.adjust_prune_modes(provider, start_block, max_block)?; let static_file_provider = provider.static_file_provider(); - // We only use static files for Receipts, if there is no receipt pruning of any kind. - let write_receipts_to = if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { - debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); - let mut producer = - prepare_static_file_producer(provider, &static_file_provider, start_block)?; - // Since there might be a database <-> static file inconsistency (read - // `prepare_static_file_producer` for context), we commit the change straight away. - producer.commit()?; - StorageLocation::StaticFiles - } else { - StorageLocation::Database - }; + self.ensure_consistency(provider, input.checkpoint().block_number, None)?; let db = StateProviderDatabase(LatestStateProviderRef::new(provider)); let mut executor = self.executor_provider.batch_executor(db); @@ -361,7 +432,7 @@ where let time = Instant::now(); // write output - provider.write_to_storage(state, OriginalValuesKnown::Yes, write_receipts_to)?; + provider.write_to_storage(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( @@ -408,10 +479,13 @@ where }) } + self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?; + // Unwind account and storage changesets, as well as receipts. // // This also updates `PlainStorageState` and `PlainAccountState`. - let bundle_state_with_receipts = provider.take_state_above(unwind_to)?; + let bundle_state_with_receipts = + provider.take_state_above(unwind_to, StorageLocation::Both)?; // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { @@ -432,25 +506,6 @@ where } } - let static_file_provider = provider.static_file_provider(); - - // Unwind all receipts for transactions in the block range - if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - // We only use static files for Receipts, if there is no receipt pruning of any kind. - - // prepare_static_file_producer does a consistency check that will unwind static files - // if the expected highest receipt in the files is higher than the database. - // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = - prepare_static_file_producer(provider, &static_file_provider, *range.start())?; - } else { - // If there is any kind of receipt pruning/filtering we use the database, since static - // files do not support filters. - // - // If we hit this case, the receipts have already been unwound by the call to - // `take_state`. - } - // Update the checkpoint. let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { @@ -576,85 +631,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency -/// check. -/// -/// This function compares the highest receipt number recorded in the database with that in the -/// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. **If -/// the height in the static file is higher**, it rolls back (unwinds) the static file. -/// **Conversely, if the height in the database is lower**, it triggers a rollback in the database -/// (by returning [`StageError`]) until the heights in both the database and static file match. -fn prepare_static_file_producer<'a, 'b, Provider>( - provider: &'b Provider, - static_file_provider: &'a StaticFileProvider, - start_block: u64, -) -> Result, StageError> -where - Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, - 'b: 'a, -{ - // Get next expected receipt number - let tx = provider.tx_ref(); - let next_receipt_num = tx - .cursor_read::()? - .seek_exact(start_block)? - .map(|(_, value)| value.first_tx_num) - .unwrap_or(0); - - // Get next expected receipt number in static files - let next_static_file_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .map(|num| num + 1) - .unwrap_or(0); - - let mut static_file_producer = - static_file_provider.get_writer(start_block, StaticFileSegment::Receipts)?; - - // Check if we had any unexpected shutdown after committing to static files, but - // NOT committing to database. - match next_static_file_receipt_num.cmp(&next_receipt_num) { - // It can be equal when it's a chain of empty blocks, but we still need to update the last - // block in the range. - Ordering::Greater | Ordering::Equal => static_file_producer.prune_receipts( - next_static_file_receipt_num - next_receipt_num, - start_block.saturating_sub(1), - )?, - Ordering::Less => { - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .unwrap_or(0); - - let last_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .unwrap_or(0); - - // To be extra safe, we make sure that the last receipt num matches the last block from - // its indices. If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_receipt_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Receipts, - }) - } - } - - Ok(static_file_producer) -} - #[cfg(test)] mod tests { use super::*; @@ -900,7 +876,7 @@ mod tests { // Tests node with database and node with static files for mut mode in modes { - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); if let Some(mode) = &mut mode { // Simulating a full node where we write receipts to database @@ -909,6 +885,7 @@ mod tests { let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -973,9 +950,10 @@ mod tests { "Post changed of a account" ); - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.unwrap_or_default()); let _result = stage .unwind( @@ -1050,6 +1028,7 @@ mod tests { // Test Execution let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -1057,7 +1036,8 @@ mod tests { // Test Unwind provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = stage .unwind( diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index caf039faca1..5aa1f3f880c 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,5 +1,5 @@ //! Utils for `stages`. -use alloy_primitives::BlockNumber; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_config::config::EtlConfig; use reth_db::BlockNumberList; use reth_db_api::{ @@ -10,7 +10,11 @@ use reth_db_api::{ DatabaseError, }; use reth_etl::Collector; -use reth_provider::DBProvider; +use reth_primitives::StaticFileSegment; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, ProviderError, + StaticFileProviderFactory, +}; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -244,3 +248,36 @@ impl LoadMode { matches!(self, Self::Flush) } } + +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. +pub(crate) fn missing_static_data_error( + last_tx_num: TxNumber, + static_file_provider: &StaticFileProvider, + provider: &Provider, + segment: StaticFileSegment, +) -> Result +where + Provider: BlockReader + StaticFileProviderFactory, +{ + let mut last_block = + static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); + + // To be extra safe, we make sure that the last tx num matches the last block from its indices. + // If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_tx_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); + + Ok(StageError::MissingStaticFileData { block: missing_block, segment }) +} diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8e4d22067df..33c15280d7d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -206,6 +206,12 @@ impl DatabaseProvider { Ok(Box::new(state_provider)) } + + #[cfg(feature = "test-utils")] + /// Sets the prune modes for provider. + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } } impl NodePrimitivesProvider for DatabaseProvider { @@ -335,6 +341,34 @@ impl DatabaseProvider ProviderResult<()> { + if remove_from.database() { + // iterate over block body and remove receipts + self.remove::(from_tx..)?; + } + + if remove_from.static_files() && !self.prune_modes.has_receipts_pruning() { + let static_file_receipt_num = + self.static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts); + + let to_delete = static_file_receipt_num + .map(|static_num| (static_num + 1).saturating_sub(from_tx)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Receipts)? + .prune_receipts(to_delete, last_block)?; + } + + Ok(()) + } } impl TryIntoHistoricalStateProvider for DatabaseProvider { @@ -1951,7 +1985,11 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn remove_state_above(&self, block: BlockNumber) -> ProviderResult<()> { + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()> { let range = block + 1..=self.last_block_number()?; if range.is_empty() { @@ -1964,8 +2002,6 @@ impl StateChangeWriter // get transaction receipts let from_transaction_num = block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2018,8 +2054,7 @@ impl StateChangeWriter } } - // iterate over block body and remove receipts - self.remove::(from_transaction_num..=to_transaction_num)?; + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; Ok(()) } @@ -2045,7 +2080,11 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state_above(&self, block: BlockNumber) -> ProviderResult { + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult { let range = block + 1..=self.last_block_number()?; if range.is_empty() { @@ -2115,22 +2154,45 @@ impl StateChangeWriter } } - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); + // Collect receipts into tuples (tx_num, receipt) to correctly handle pruned receipts + let mut receipts_iter = self + .static_file_provider + .get_range_with_static_file_or_database( + StaticFileSegment::Receipts, + from_transaction_num..to_transaction_num + 1, + |static_file, range, _| { + static_file + .receipts_by_tx_range(range.clone()) + .map(|r| range.into_iter().zip(r).collect()) + }, + |range, _| { + self.tx + .cursor_read::()? + .walk_range(range)? + .map(|r| r.map_err(Into::into)) + .collect() + }, + |_| true, + )? + .into_iter() + .peekable(); let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); + for num in block_body.tx_num_range() { + if receipts_iter.peek().is_some_and(|(n, _)| *n == num) { + block_receipts.push(receipts_iter.next().map(|(_, r)| r)); + } else { + block_receipts.push(None); } } receipts.push(block_receipts); } + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; + Ok(ExecutionOutcome::new_init( state, reverts, @@ -2594,20 +2656,20 @@ impl BlockExecu fn take_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; self.unwind_trie_state_range(range.clone())?; // get execution res - let execution_state = self.take_state_above(block)?; + let execution_state = self.take_state_above(block, remove_from)?; let blocks = self.sealed_block_with_senders_range(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove_blocks_above(block, remove_transactions_from)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress self.update_pipeline_stages(block, true)?; @@ -2618,18 +2680,18 @@ impl BlockExecu fn remove_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()> { let range = block + 1..=self.last_block_number()?; self.unwind_trie_state_range(range)?; // remove execution res - self.remove_state_above(block)?; + self.remove_state_above(block, remove_from)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove_blocks_above(block, remove_transactions_from)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress self.update_pipeline_stages(block, true)?; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index be4042fe28f..6d7e576124a 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -37,19 +37,25 @@ pub trait BlockExecutionWriter: /// Take all of the blocks above the provided number and their execution result /// /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. fn take_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult>; /// Remove all of the blocks above the provided number and their execution result /// /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. fn remove_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()>; } @@ -57,17 +63,17 @@ impl BlockExecutionWriter for &T { fn take_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult> { - (*self).take_block_and_execution_above(block, remove_transactions_from) + (*self).take_block_and_execution_above(block, remove_from) } fn remove_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()> { - (*self).remove_block_and_execution_above(block, remove_transactions_from) + (*self).remove_block_and_execution_above(block, remove_from) } } diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 057d3a19a7e..2e46e285070 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -40,9 +40,17 @@ pub trait StateChangeWriter { /// Remove the block range of state above the given block. The state of the passed block is not /// removed. - fn remove_state_above(&self, block: BlockNumber) -> ProviderResult<()>; + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()>; /// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed /// block is not removed. - fn take_state_above(&self, block: BlockNumber) -> ProviderResult; + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index d4d5116de97..c0eeb64b8a2 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -189,25 +189,16 @@ where /// database and static files. This is exclusive, i.e., it only removes blocks above /// `block_number`, and does not remove `block_number`. pub fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block + debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; + // Get highest static file block for the total block range let highest_static_file_block = self .static_file() .get_highest_static_file_block(StaticFileSegment::Headers) .expect("todo: error handling, headers should exist"); - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - let tx_range = self - .database() - .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - // We are using end + 1 - start here because the returned range is inclusive. - let total_txs = (tx_range.end() + 1).saturating_sub(*tx_range.start()); - - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; - // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. // @@ -218,12 +209,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - if !self.database().prune_modes_ref().has_receipts_pruning() { - self.static_file() - .get_writer(block_number, StaticFileSegment::Receipts)? - .prune_receipts(total_txs, block_number)?; - } - Ok(()) } } From db9b86a7d6e6f23b0008f7a70dc6c1d8cb9d7214 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 27 Nov 2024 12:56:33 +0100 Subject: [PATCH 722/970] Upcast trait bound on `BlockBody::Transaction` to `SignedTransaction` (#12903) --- crates/primitives-traits/src/block/body.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 44120562c17..76bf916add9 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -2,10 +2,9 @@ use alloc::{fmt, vec::Vec}; -use alloy_consensus::Transaction; use alloy_eips::eip4895::Withdrawals; -use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, SignedTransaction}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -29,7 +28,7 @@ pub trait BlockBody: + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. - type Transaction: Transaction; + type Transaction: SignedTransaction; /// Ommer header type. type OmmerHeader; From b62929c3908ae7c8020f46f4e38d5a454712f46b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 14:40:48 +0100 Subject: [PATCH 723/970] feat: add engine validator addon (#12905) --- crates/e2e-test-utils/src/lib.rs | 10 ++++--- crates/node/builder/src/launch/engine.rs | 5 ++-- crates/node/builder/src/rpc.rs | 34 +++++++++++++++++++----- crates/optimism/node/src/node.rs | 30 ++++++++++++++------- 4 files changed, 57 insertions(+), 22 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 51951bd4f52..f4939f2c011 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -16,9 +16,10 @@ use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ - components::NodeComponentsBuilder, rpc::RethRpcAddOns, EngineNodeLauncher, - FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypesWithDBAdapter, - NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, + components::NodeComponentsBuilder, + rpc::{EngineValidatorAddOn, RethRpcAddOns}, + EngineNodeLauncher, FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, + NodeTypesWithDBAdapter, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; use reth_provider::providers::{BlockchainProvider, BlockchainProvider2, NodeTypesForProvider}; use tracing::{span, Level}; @@ -131,7 +132,8 @@ where Network: PeersHandleProvider, >, >, - N::AddOns: RethRpcAddOns>>>, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn>>>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 842eae43581..430ca31a5b1 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -40,7 +40,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{RethRpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -74,7 +74,8 @@ where ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, + AO: RethRpcAddOns> + + EngineValidatorAddOn>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 6e0be36c20e..2eae77f8d83 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -575,13 +575,35 @@ impl>> EthApi } } +/// Helper trait that provides the validator for the engine API +pub trait EngineValidatorAddOn: Send { + /// The Validator type to use for the engine API. + type Validator: EngineValidator<::Engine>; + + /// Creates the engine validator for an engine API based node. + fn engine_validator( + &self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future>; +} + +impl EngineValidatorAddOn for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes, + EV: EngineValidatorBuilder, +{ + type Validator = EV::Validator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + self.engine_validator_builder.clone().build(ctx).await + } +} + /// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { +pub trait EngineValidatorBuilder: Send + Clone { /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; + type Validator: EngineValidator<::Engine>; /// Creates the engine validator. fn build( @@ -595,7 +617,7 @@ where Node: FullNodeComponents, Validator: EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Clone, Fut: Future> + Send, { type Validator = Validator; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b3036722d79..d6cd47cf2af 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1,7 +1,11 @@ //! Optimism Node types config. -use std::sync::Arc; - +use crate::{ + args::RollupArgs, + engine::OpEngineValidator, + txpool::{OpTransactionPool, OpTransactionValidator}, + OpEngineTypes, +}; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; @@ -17,7 +21,7 @@ use reth_node_builder::{ PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::{EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; @@ -42,13 +46,7 @@ use reth_transaction_pool::{ TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; - -use crate::{ - args::RollupArgs, - engine::OpEngineValidator, - txpool::{OpTransactionPool, OpTransactionValidator}, - OpEngineTypes, -}; +use std::sync::Arc; /// Storage implementation for Optimism. #[derive(Debug, Default, Clone)] @@ -260,6 +258,18 @@ where } } +impl EngineValidatorAddOn for OpAddOns +where + N: FullNodeComponents>, + OpEngineValidator: EngineValidator<::Engine>, +{ + type Validator = OpEngineValidator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + OpEngineValidatorBuilder::default().build(ctx).await + } +} + /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] From 533b555f870f6d6a8a6fb6b5be24c332359c738e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 27 Nov 2024 18:02:25 +0400 Subject: [PATCH 724/970] feat: add `Receipt` AT to `ReceiptProvider` (#12890) --- crates/e2e-test-utils/src/rpc.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 2 +- crates/net/network/src/config.rs | 6 ++- crates/net/network/src/eth_requests.rs | 6 ++- crates/net/network/src/test_utils/testnet.rs | 16 +++++-- crates/node/builder/src/builder/mod.rs | 6 ++- crates/node/types/src/lib.rs | 3 ++ crates/optimism/rpc/src/eth/pending_block.rs | 6 ++- crates/optimism/rpc/src/eth/receipt.rs | 5 ++- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/rpc/rpc-builder/src/eth.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 45 +++++++++++++------ crates/rpc/rpc-eth-api/src/helpers/block.rs | 8 ++-- .../rpc-eth-api/src/helpers/pending_block.rs | 13 ++++-- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 10 ++--- .../rpc-eth-api/src/helpers/transaction.rs | 7 +-- crates/rpc/rpc-eth-api/src/types.rs | 14 +++--- crates/rpc/rpc-eth-types/src/cache/mod.rs | 6 +-- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 2 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 6 ++- crates/rpc/rpc/src/eth/helpers/receipt.rs | 7 ++- .../src/providers/blockchain_provider.rs | 26 +++++++---- .../provider/src/providers/consistent.rs | 27 ++++++----- .../provider/src/providers/database/mod.rs | 20 +++++---- .../src/providers/database/provider.rs | 23 ++++++---- crates/storage/provider/src/providers/mod.rs | 15 ++++--- .../provider/src/providers/static_file/jar.rs | 23 ++++++---- .../src/providers/static_file/manager.rs | 29 +++++++----- .../storage/provider/src/test_utils/mock.rs | 4 ++ .../storage/provider/src/test_utils/noop.rs | 1 + crates/storage/provider/src/traits/block.rs | 8 +++- crates/storage/provider/src/traits/full.rs | 17 ++++--- crates/storage/storage-api/src/block.rs | 10 ++--- crates/storage/storage-api/src/receipts.rs | 22 +++++---- .../storage/storage-api/src/transactions.rs | 5 ++- examples/db-access/src/main.rs | 4 +- 37 files changed, 267 insertions(+), 143 deletions(-) diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index a57861d2b14..0006989d316 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -26,7 +26,7 @@ where Node: FullNodeComponents< Types: NodeTypes< ChainSpec: EthereumHardforks, - Primitives: NodePrimitives, + Primitives: NodePrimitives, >, >, EthApi: EthApiSpec + EthTransactions + TraceExt, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 8819cda966b..7955792ade1 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -539,7 +539,7 @@ where P: DatabaseProviderFactory + BlockReader + StateProviderFactory - + StateReader + + StateReader + Clone + 'static,

::Provider: BlockReader, diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 2e8f9f4cc7a..a7d8a98fae6 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -147,7 +147,11 @@ where impl NetworkConfig where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. pub async fn start_network(self) -> Result { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index c0d22f97da8..bb45507bdbd 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -80,7 +80,7 @@ impl EthRequestHandler { impl EthRequestHandler where - C: BlockReader + HeaderProvider + ReceiptProvider, + C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers fn get_headers_response(&self, request: GetBlockHeaders) -> Vec

{ @@ -224,7 +224,9 @@ where /// This should be spawned or used as part of `tokio::select!`. impl Future for EthRequestHandler where - C: BlockReader + HeaderProvider + Unpin, + C: BlockReader + + HeaderProvider + + Unpin, { type Output = (); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 34c08f637ba..9801ecf9293 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -194,7 +194,11 @@ where impl Testnet where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { /// Spawns the testnet to a separate task @@ -253,7 +257,10 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); @@ -448,7 +455,10 @@ where impl Future for Peer where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index c3361273433..06d5294d800 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -651,7 +651,8 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, - Node::Provider: BlockReader, + Node::Provider: + BlockReader, { self.start_network_with(builder, pool, Default::default()) } @@ -670,7 +671,8 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, - Node::Provider: BlockReader, + Node::Provider: + BlockReader, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 40d0defe24e..c0d266e5775 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -244,3 +244,6 @@ pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody /// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; + +/// Helper adapter type for accessing [`NodePrimitives::Receipt`] on [`NodeTypes`]. +pub type ReceiptTy = <::Primitives as NodePrimitives>::Receipt; diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 0319e15c81e..98ea65778d8 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -24,8 +24,10 @@ impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, N: RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 5064c9ed5cf..e803ea21019 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::{ChainSpecProvider, TransactionsProvider}; +use reth_provider::{ChainSpecProvider, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; @@ -21,7 +21,8 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, - Self::Provider: TransactionsProvider, + Self::Provider: + TransactionsProvider + ReceiptProvider, { async fn build_transaction_receipt( &self, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 19bcd31dacc..3202dc46ad1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -74,7 +74,7 @@ where impl TransactionCompat for OpEthApi where - N: FullNodeComponents, + N: FullNodeComponents>, { type Transaction = Transaction; type Error = OpEthApiError; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index be25236ff81..59b3ef870fe 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -29,7 +29,7 @@ pub struct EthHandlers { impl EthHandlers where Provider: StateProviderFactory - + BlockReader + + BlockReader + EvmEnvProvider + Clone + Unpin diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ddfe173ee1c..8f5c84835aa 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -37,8 +37,11 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider -//! + AccountReader +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, @@ -114,8 +117,11 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider -//! + AccountReader +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, @@ -195,7 +201,7 @@ use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::EthPrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, @@ -263,7 +269,9 @@ pub async fn launch, ) -> Result where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -646,7 +654,10 @@ where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, - Provider: BlockReader::Block>, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let Self { provider, @@ -722,7 +733,7 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, - Provider: BlockReader, + Provider: BlockReader, { let Self { provider, @@ -757,7 +768,10 @@ where ) -> TransportRpcModules<()> where EthApi: FullEthApiServer, - Provider: BlockReader::Block>, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let mut modules = TransportRpcModules::default(); @@ -916,7 +930,7 @@ impl RpcRegistryInner where Provider: StateProviderFactory - + BlockReader + + BlockReader + EvmEnvProvider + Clone + Unpin @@ -1125,7 +1139,10 @@ where pub fn register_debug(&mut self) -> &mut Self where EthApi: EthApiSpec + EthTransactions + TraceExt, - Provider: BlockReader::Block>, + Provider: BlockReader< + Block = ::Block, + Receipt = reth_primitives::Receipt, + >, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1279,8 +1296,10 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider::Block> - + AccountReader + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + > + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index c78c7c59876..cce0aa01b01 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -7,8 +7,10 @@ use alloy_eips::BlockId; use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; use reth_node_api::BlockBody; -use reth_primitives::{Receipt, SealedBlockFor, SealedBlockWithSenders}; -use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; +use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_provider::{ + BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderReceipt, +}; use reth_rpc_types_compat::block::from_block; use crate::{ @@ -24,7 +26,7 @@ pub type BlockReceiptsResult = Result>>, E>; pub type BlockAndReceiptsResult = Result< Option<( SealedBlockFor<<::Provider as BlockReader>::Block>, - Arc>, + Arc::Provider>>>, )>, ::Error, >; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index c166c31d755..36ba2c1e84e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -22,7 +22,7 @@ use reth_primitives::{ }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ReceiptProvider, StateProviderFactory, + ProviderReceipt, ReceiptProvider, StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, @@ -45,8 +45,10 @@ use tracing::debug; pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, @@ -119,7 +121,10 @@ pub trait LoadPendingBlock: &self, ) -> impl Future< Output = Result< - Option<(SealedBlockWithSenders<::Block>, Vec)>, + Option<( + SealedBlockWithSenders<::Block>, + Vec>, + )>, Self::Error, >, > + Send diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 7e1992017d8..f663c5863b5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,8 +2,8 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta}; -use reth_provider::TransactionsProvider; +use reth_primitives::TransactionMeta; +use reth_provider::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; @@ -11,13 +11,13 @@ use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. pub trait LoadReceipt: - EthApiTypes + RpcNodeCoreExt + Send + Sync + EthApiTypes + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: ::Transaction, + tx: ProviderTx, meta: TransactionMeta, - receipt: Receipt, + receipt: ProviderReceipt, ) -> impl Future, Self::Error>> + Send; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 3b6fc837c40..6ad8f8fd6ec 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -8,9 +8,10 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; +use reth_primitives::{SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, + BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, + TransactionsProvider, }; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, @@ -159,7 +160,7 @@ pub trait EthTransactions: LoadTransaction { hash: TxHash, ) -> impl Future< Output = Result< - Option<(ProviderTx, TransactionMeta, Receipt)>, + Option<(ProviderTx, TransactionMeta, ProviderReceipt)>, Self::Error, >, > + Send diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 994f9ac884d..2bac068483c 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -8,7 +8,7 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; use reth_primitives::TransactionSigned; -use reth_provider::TransactionsProvider; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; @@ -47,8 +47,10 @@ pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes where - Self: RpcNodeCore> - + EthApiTypes< + Self: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, Transaction = RpcTransaction, @@ -59,8 +61,10 @@ where } impl FullEthApiTypes for T where - T: RpcNodeCore> - + EthApiTypes< + T: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, Transaction = RpcTransaction, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 9e83e323c1a..70c8b1a4f54 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -106,7 +106,7 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory - + BlockReader + + BlockReader + EvmEnvProvider + Clone + Unpin @@ -128,7 +128,7 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory - + BlockReader + + BlockReader + EvmEnvProvider + Clone + Unpin @@ -348,7 +348,7 @@ where impl Future for EthStateCacheService where Provider: StateProviderFactory - + BlockReader + + BlockReader + EvmEnvProvider + Clone + Unpin diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index f16faddbfff..9fc1be93a2f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -794,7 +794,7 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 34500b370e6..b6b37c9f393 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -438,7 +438,7 @@ mod tests { use crate::EthApi; fn build_test_eth_api< - P: BlockReaderIdExt + P: BlockReaderIdExt + BlockReader + ChainSpecProvider + EvmEnvProvider diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 23e5f671dbe..a67522ce032 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -18,8 +18,10 @@ impl LoadPendingBlock where Self: SpawnBlocking + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 13b0dab2593..ae723fc5314 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,7 +1,7 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_provider::TransactionsProvider; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -9,7 +9,10 @@ use crate::EthApi; impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt>, + Self: RpcNodeCoreExt< + Provider: TransactionsProvider + + ReceiptProvider, + >, { async fn build_transaction_receipt( &self, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1dd1e47ec6a..37d984e6774 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,7 +25,7 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, @@ -288,7 +288,7 @@ impl BlockReader for BlockchainProvider2 { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -411,28 +411,33 @@ impl TransactionsProvider for BlockchainProvider2 { } impl ReceiptProvider for BlockchainProvider2 { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block_id(block) } } @@ -759,6 +764,8 @@ impl AccountReader for BlockchainProvider2 { } impl StateReader for BlockchainProvider2 { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -768,7 +775,10 @@ impl StateReader for BlockchainProvider2 { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { StateReader::get_state(&self.consistent_provider()?, block) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 64a940190db..cf473a1fbff 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -19,10 +19,10 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::{BlockTy, TxTy}; +use reth_node_types::{BlockTy, ReceiptTy, TxTy}; use reth_primitives::{ - Account, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, + Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, + TransactionMeta, }; use reth_primitives_traits::{Block, BlockBody}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -151,7 +151,7 @@ impl ConsistentProvider { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { if range.is_empty() { return Ok(None) } @@ -828,7 +828,7 @@ impl BlockReader for ConsistentProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -1078,7 +1078,9 @@ impl TransactionsProvider for ConsistentProvider { } impl ReceiptProvider for ConsistentProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.receipt(id), @@ -1088,7 +1090,7 @@ impl ReceiptProvider for ConsistentProvider { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { for block_state in self.head_block.iter().flat_map(|b| b.chain()) { let executed_block = block_state.block_ref(); let block = executed_block.block(); @@ -1112,7 +1114,10 @@ impl ReceiptProvider for ConsistentProvider { self.storage_provider.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( block, |db_provider| db_provider.receipts_by_block(block), @@ -1123,7 +1128,7 @@ impl ReceiptProvider for ConsistentProvider { fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), @@ -1135,7 +1140,7 @@ impl ReceiptProvider for ConsistentProvider { } impl ReceiptProviderIdExt for ConsistentProvider { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; @@ -1496,6 +1501,8 @@ impl AccountReader for ConsistentProvider { } impl StateReader for ConsistentProvider { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index e033803680a..3c22a1a73a2 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,10 +19,10 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, + BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -381,7 +381,7 @@ impl BlockReader for ProviderFactory { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } @@ -508,7 +508,8 @@ impl TransactionsProvider for ProviderFactory { } impl ReceiptProvider for ProviderFactory { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, @@ -517,18 +518,21 @@ impl ReceiptProvider for ProviderFactory { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 33c15280d7d..56949201752 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -46,11 +46,11 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{BlockTy, BodyTy, NodeTypes, TxTy}; +use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, - SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSignedNoHash, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + TransactionSignedNoHash, }; use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -1199,7 +1199,7 @@ impl BlockReader for DatabaseProvid fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1529,7 +1529,9 @@ impl TransactionsProvider for Datab } impl ReceiptProvider for DatabaseProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, @@ -1538,7 +1540,7 @@ impl ReceiptProvider for DatabasePr ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { self.receipt(id) } else { @@ -1546,7 +1548,10 @@ impl ReceiptProvider for DatabasePr } } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(block)? { if let Some(body) = self.block_body_indices(number)? { let tx_range = body.tx_num_range(); @@ -1563,7 +1568,7 @@ impl ReceiptProvider for DatabasePr fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index ab15093ac5e..92c94952a34 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -24,7 +24,7 @@ use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, @@ -516,22 +516,27 @@ impl TransactionsProvider for BlockchainProvider { } impl ReceiptProvider for BlockchainProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.database.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.receipts_by_tx_range(range) } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index e04d46312f6..659b093d9d6 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -18,7 +18,7 @@ use reth_db::{ table::Decompress, }; use reth_node_types::NodePrimitives; -use reth_primitives::{transaction::recover_signers, Receipt, SealedHeader, TransactionMeta}; +use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ @@ -300,14 +300,16 @@ impl> TransactionsPr } } -impl> ReceiptProvider - for StaticFileJarProvider<'_, N> +impl> + ReceiptProvider for StaticFileJarProvider<'_, N> { - fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(num) = tx_static_file.transaction_id(hash)? { return self.receipt(num) @@ -316,7 +318,10 @@ impl> ReceiptProvide Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { // Related to indexing tables. StaticFile should get the tx_range and call static file // provider with `receipt()` instead for each Err(ProviderError::UnsupportedProvider) @@ -325,13 +330,13 @@ impl> ReceiptProvide fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut receipts = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = cursor.get_one::>(num.into())? { + if let Some(tx) = cursor.get_one::>(num.into())? { receipts.push(tx) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 34f3b91f627..2f32edcc294 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -38,8 +38,8 @@ use reth_primitives::{ DEFAULT_BLOCKS_PER_STATIC_FILE, }, transaction::recover_signers, - BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSignedNoHash, + BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, TransactionSignedNoHash, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; @@ -1341,10 +1341,12 @@ impl BlockHashReader for StaticFileProvider { } } -impl> ReceiptProvider +impl> ReceiptProvider for StaticFileProvider { - fn receipt(&self, num: TxNumber) -> ProviderResult> { + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { @@ -1356,31 +1358,36 @@ impl> ReceiptProvider }) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(num) = self.transaction_id(hash)? { return self.receipt(num) } Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { unreachable!() } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Receipts, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } } -impl> TransactionsProviderExt for StaticFileProvider { +impl> TransactionsProviderExt + for StaticFileProvider +{ fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1575,7 +1582,7 @@ impl BlockNumReader for StaticFileProvider { } } -impl> BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { type Block = N::Block; fn find_block_by_hash( @@ -1606,7 +1613,7 @@ impl> BlockReader for StaticFileProvider< fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index cfee10f9e38..12c0330ac0e 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -379,6 +379,8 @@ impl TransactionsProvider for MockEthProvider { } impl ReceiptProvider for MockEthProvider { + type Receipt = Receipt; + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -828,6 +830,8 @@ impl ChangeSetReader for MockEthProvider { } impl StateReader for MockEthProvider { + type Receipt = Receipt; + fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 7f427b9305a..ff6b3fccbe1 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -260,6 +260,7 @@ impl TransactionsProvider for NoopProvider { } impl ReceiptProvider for NoopProvider { + type Receipt = Receipt; fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 6d7e576124a..e7669b0eade 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -80,8 +80,14 @@ impl BlockExecutionWriter for &T { /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { + /// Receipt type in [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Get the [`ExecutionOutcome`] for the given block - fn get_state(&self, block: BlockNumber) -> ProviderResult>; + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>>; } /// Block Writer diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index cb2cbe3438f..0d28f83739b 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,19 +7,21 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::{BlockTy, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy> + + NodePrimitivesProvider + + StaticFileProviderFactory + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader - + CanonStateSubscriptions + + CanonStateSubscriptions + ForkChoiceSubscriptions + StageCheckpointReader + Clone @@ -30,14 +32,15 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy> + + NodePrimitivesProvider + + StaticFileProviderFactory + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader - + CanonStateSubscriptions + + CanonStateSubscriptions + ForkChoiceSubscriptions + StageCheckpointReader + Clone diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 494a7e5aa41..204e9027da2 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -6,9 +6,7 @@ use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; -use reth_primitives::{ - BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -95,7 +93,7 @@ pub trait BlockReader: #[allow(clippy::type_complexity)] fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>>; + ) -> ProviderResult, Vec)>>; /// Returns the ommers/uncle headers of the given block from the database. /// @@ -186,7 +184,7 @@ impl BlockReader for std::sync::Arc { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { @@ -255,7 +253,7 @@ impl BlockReader for &T { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index bd6b978e375..67257cce67c 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,33 +1,38 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{TxHash, TxNumber}; -use reth_primitives::Receipt; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; -/// Client trait for fetching [Receipt] data . +/// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { + /// The receipt type. + type Receipt: Send + Sync; + /// Get receipt by transaction number /// /// Returns `None` if the transaction is not found. - fn receipt(&self, id: TxNumber) -> ProviderResult>; + fn receipt(&self, id: TxNumber) -> ProviderResult>; /// Get receipt by transaction hash. /// /// Returns `None` if the transaction is not found. - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get receipts by block num or hash. /// /// Returns `None` if the block is not found. - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>>; + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>>; /// Get receipts by tx range. fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; } /// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion. @@ -40,10 +45,9 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { let id = match block { BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash), BlockId::Number(num_tag) => { @@ -64,7 +68,7 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { fn receipts_by_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.receipts_by_block_id(number_or_tag.into()) } } diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index ca2bcaeb469..3bb20b7e161 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,4 +1,4 @@ -use crate::{BlockNumReader, BlockReader}; +use crate::{BlockNumReader, BlockReader, ReceiptProvider}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; use reth_primitives::TransactionMeta; @@ -84,6 +84,9 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// A helper type alias to access [`TransactionsProvider::Transaction`]. pub type ProviderTx

=

::Transaction; +/// A helper type alias to access [`ReceiptProvider::Receipt`]. +pub type ProviderReceipt

=

::Receipt; + /// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 1fbf833293d..9f95fb51d91 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -166,7 +166,9 @@ fn block_provider_example>( /// The `ReceiptProvider` allows querying the receipts tables. fn receipts_provider_example< - T: ReceiptProvider + TransactionsProvider + HeaderProvider, + T: ReceiptProvider + + TransactionsProvider + + HeaderProvider, >( provider: T, ) -> eyre::Result<()> { From 1131bdecc31cacd31f58552e365eaf670c83fc0a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 27 Nov 2024 15:14:29 +0100 Subject: [PATCH 725/970] feat(engine): proof fetching on state update for StateRootTask (#12458) Co-authored-by: Roman Krasiuk Co-authored-by: Alexey Shekhirin --- Cargo.lock | 5 + crates/engine/tree/Cargo.toml | 11 + .../tree/benches/state_root_from_proofs.rs | 81 ++ crates/engine/tree/src/tree/mod.rs | 1 + crates/engine/tree/src/tree/root.rs | 802 ++++++++++++++++-- crates/trie/parallel/src/proof.rs | 8 +- crates/trie/parallel/src/root.rs | 5 + crates/trie/trie/src/input.rs | 2 +- 8 files changed, 842 insertions(+), 73 deletions(-) create mode 100644 crates/engine/tree/benches/state_root_from_proofs.rs diff --git a/Cargo.lock b/Cargo.lock index 9ed5ce894bd..01e53bb4632 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7253,6 +7253,8 @@ dependencies = [ "crossbeam-channel", "futures", "metrics", + "rand 0.8.5", + "rayon", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7264,6 +7266,7 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-evm", + "reth-execution-errors", "reth-exex-types", "reth-metrics", "reth-network-p2p", @@ -7281,8 +7284,10 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 7a71ce411eb..01d7e7e2024 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -21,6 +21,7 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true +reth-execution-errors.workspace = true reth-network-p2p.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true @@ -32,6 +33,7 @@ reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true +reth-trie-db.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse.workspace = true reth-trie.workspace = true @@ -55,6 +57,7 @@ metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +rayon.workspace = true tracing.workspace = true # optional deps for test-utils @@ -77,6 +80,7 @@ reth-prune.workspace = true reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true +reth-testing-utils.workspace = true reth-tracing.workspace = true # alloy @@ -85,11 +89,16 @@ alloy-rlp.workspace = true assert_matches.workspace = true criterion.workspace = true crossbeam-channel = "0.5.13" +rand.workspace = true [[bench]] name = "channel_perf" harness = false +[[bench]] +name = "state_root_from_proofs" +harness = false + [features] test-utils = [ "reth-blockchain-tree/test-utils", @@ -110,4 +119,6 @@ test-utils = [ "reth-static-file", "reth-tracing", "reth-trie/test-utils", + "reth-prune-types?/test-utils", + "reth-trie-db/test-utils", ] diff --git a/crates/engine/tree/benches/state_root_from_proofs.rs b/crates/engine/tree/benches/state_root_from_proofs.rs new file mode 100644 index 00000000000..4c8e85696ea --- /dev/null +++ b/crates/engine/tree/benches/state_root_from_proofs.rs @@ -0,0 +1,81 @@ +#![allow(missing_docs)] + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use reth_engine_tree::tree::calculate_state_root_from_proofs; +use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; +use reth_trie::{ + updates::TrieUpdatesSorted, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProof, +}; +use revm_primitives::{ + keccak256, Account, AccountInfo, AccountStatus, Address, EvmStorage, EvmStorageSlot, HashMap, + HashSet, B256, U256, +}; + +fn create_test_state(size: usize) -> (HashMap>, HashedPostState) { + let mut state = HashedPostState::default(); + let mut targets = HashMap::default(); + + for i in 0..size { + let address = Address::random(); + let hashed_address = keccak256(address); + + // Create account + let info = AccountInfo { + balance: U256::from(100 + i), + nonce: i as u64, + code_hash: B256::random(), + code: Default::default(), + }; + + // Create storage with multiple slots + let mut storage = EvmStorage::default(); + let mut slots = HashSet::default(); + for j in 0..100 { + let slot = U256::from(j); + let value = U256::from(100 + j); + storage.insert(slot, EvmStorageSlot::new(value)); + slots.insert(keccak256(B256::from(slot))); + } + + let account = Account { info, storage: storage.clone(), status: AccountStatus::Loaded }; + + state.accounts.insert(hashed_address, Some(account.info.into())); + state.storages.insert( + hashed_address, + HashedStorage::from_iter( + false, + storage.into_iter().map(|(k, v)| (keccak256(B256::from(k)), v.present_value)), + ), + ); + targets.insert(hashed_address, slots); + } + + (targets, state) +} + +fn bench_state_root_collection(c: &mut Criterion) { + let factory = create_test_provider_factory(); + let view = ConsistentDbView::new(factory, None); + + let mut group = c.benchmark_group("state_root_collection"); + for size in &[10, 100, 1000] { + let (_targets, state) = create_test_state(*size); + let multiproof = MultiProof::default(); + + group.bench_with_input(format!("size_{}", size), size, |b, _| { + b.iter(|| { + black_box(calculate_state_root_from_proofs( + view.clone(), + &TrieUpdatesSorted::default(), + &HashedPostStateSorted::default(), + multiproof.clone(), + state.clone(), + )) + }); + }); + } + group.finish(); +} + +criterion_group!(benches, bench_state_root_collection); +criterion_main!(benches); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 7955792ade1..a2680563925 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -76,6 +76,7 @@ pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; +pub use root::calculate_state_root_from_proofs; mod root; diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 27f835ec754..32bfbf68604 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,23 +1,35 @@ //! State root task related functionality. -use alloy_primitives::map::FbHashMap; +use alloy_primitives::map::{DefaultHashBuilder, FbHashMap, FbHashSet, HashMap, HashSet}; use alloy_rlp::{BufMut, Encodable}; -use reth_provider::providers::ConsistentDbView; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use reth_errors::ProviderResult; +use reth_execution_errors::TrieWitnessError; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, +}; use reth_trie::{ - updates::TrieUpdates, HashedPostState, MultiProof, Nibbles, TrieAccount, TrieInput, - EMPTY_ROOT_HASH, + hashed_cursor::HashedPostStateCursorFactory, + proof::Proof, + trie_cursor::InMemoryTrieCursorFactory, + updates::{TrieUpdates, TrieUpdatesSorted}, + witness::{next_root_from_proofs, target_nodes}, + HashedPostState, HashedPostStateSorted, HashedStorage, MultiProof, Nibbles, TrieAccount, + TrieInput, EMPTY_ROOT_HASH, }; +use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseProof, DatabaseTrieCursorFactory}; use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult}; -use revm_primitives::{map::FbHashSet, EvmState, B256}; +use revm_primitives::{keccak256, EvmState, B256}; use std::{ + collections::BTreeMap, sync::{ - mpsc::{self, Receiver, RecvError}, + mpsc::{self, Receiver, RecvError, Sender}, Arc, }, time::{Duration, Instant}, }; -use tracing::debug; +use tracing::{debug, error, trace}; /// The level below which the sparse trie hashes are calculated in [`update_sparse_trie`]. const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; @@ -56,7 +68,7 @@ pub(crate) struct StateRootConfig { } /// Wrapper for std channel receiver to maintain compatibility with `UnboundedReceiverStream` -#[allow(dead_code)] +#[derive(Debug)] pub(crate) struct StdReceiverStream { rx: Receiver, } @@ -72,6 +84,92 @@ impl StdReceiverStream { } } +/// Messages used internally by the state root task +#[derive(Debug)] +#[allow(dead_code)] +pub(crate) enum StateRootMessage { + /// New state update from transaction execution + StateUpdate(EvmState), + /// Proof calculation completed for a specific state update + ProofCalculated { + /// The calculated proof + proof: MultiProof, + /// The index of this proof in the sequence of state updates + sequence_number: u64, + }, + /// State root calculation completed + RootCalculated { + /// The calculated state root + root: B256, + /// The trie updates produced during calculation + updates: TrieUpdates, + /// Time taken to calculate the root + elapsed: Duration, + }, +} + +/// Handle to track proof calculation ordering +#[derive(Debug, Default)] +pub(crate) struct ProofSequencer { + /// The next proof sequence number to be produced. + next_sequence: u64, + /// The next sequence number expected to be delivered. + next_to_deliver: u64, + /// Buffer for out-of-order proofs + pending_proofs: BTreeMap, +} + +impl ProofSequencer { + /// Creates a new proof sequencer + pub(crate) fn new() -> Self { + Self::default() + } + + /// Gets the next sequence number and increments the counter + pub(crate) fn next_sequence(&mut self) -> u64 { + let seq = self.next_sequence; + self.next_sequence += 1; + seq + } + + /// Adds a proof and returns all sequential proofs if we have a continuous sequence + pub(crate) fn add_proof(&mut self, sequence: u64, proof: MultiProof) -> Vec { + if sequence >= self.next_to_deliver { + self.pending_proofs.insert(sequence, proof); + } + + // return early if we don't have the next expected proof + if !self.pending_proofs.contains_key(&self.next_to_deliver) { + return Vec::new() + } + + let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); + let mut current_sequence = self.next_to_deliver; + + // keep collecting proofs as long as we have consecutive sequence numbers + while let Some(proof) = self.pending_proofs.remove(¤t_sequence) { + consecutive_proofs.push(proof); + current_sequence += 1; + + // if we don't have the next number, stop collecting + if !self.pending_proofs.contains_key(¤t_sequence) { + break; + } + } + + if !consecutive_proofs.is_empty() { + self.next_to_deliver += consecutive_proofs.len() as u64; + } + + consecutive_proofs + } + + /// Returns true if we still have pending proofs + pub(crate) fn has_pending(&self) -> bool { + !self.pending_proofs.is_empty() + } +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -80,25 +178,41 @@ impl StdReceiverStream { /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[allow(dead_code)] +#[derive(Debug)] pub(crate) struct StateRootTask { - /// Incoming state updates. - state_stream: StdReceiverStream, - /// Task configuration. + /// Receiver for state root related messages + rx: Receiver, + /// Sender for state root related messages + tx: Sender, + /// Task configuration config: StateRootConfig, + /// Current state + state: HashedPostState, + /// Proof sequencing handler + proof_sequencer: ProofSequencer, + /// Whether we're currently calculating a root + calculating_root: bool, } #[allow(dead_code)] impl StateRootTask where - Factory: Send + 'static, + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, { - /// Creates a new `StateRootTask`. - pub(crate) const fn new( + /// Creates a new state root task with the unified message channel + pub(crate) fn new( config: StateRootConfig, - state_stream: StdReceiverStream, + tx: Sender, + rx: Receiver, ) -> Self { - Self { config, state_stream } + Self { + config, + rx, + tx, + state: Default::default(), + proof_sequencer: ProofSequencer::new(), + calculating_root: false, + } } /// Spawns the state root task and returns a handle to await its result. @@ -118,31 +232,389 @@ where /// Handles state updates. fn on_state_update( - _view: &reth_provider::providers::ConsistentDbView, - _input: &std::sync::Arc, - _state: EvmState, + view: ConsistentDbView, + input: Arc, + update: EvmState, + state: &mut HashedPostState, + proof_sequence_number: u64, + state_root_message_sender: Sender, ) { - // Default implementation of state update handling - // TODO: calculate hashed state update and dispatch proof gathering for it. + let mut hashed_state_update = HashedPostState::default(); + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + + let destroyed = account.is_selfdestructed(); + hashed_state_update.accounts.insert( + hashed_address, + if destroyed || account.is_empty() { None } else { Some(account.info.into()) }, + ); + + if destroyed || !account.storage.is_empty() { + let storage = account.storage.into_iter().filter_map(|(slot, value)| { + value + .is_changed() + .then(|| (keccak256(B256::from(slot)), value.present_value)) + }); + hashed_state_update + .storages + .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + } + } + } + + // Dispatch proof gathering for this state update + let targets = hashed_state_update + .accounts + .keys() + .filter(|hashed_address| { + !state.accounts.contains_key(*hashed_address) && + !state.storages.contains_key(*hashed_address) + }) + .map(|hashed_address| (*hashed_address, HashSet::default())) + .chain(hashed_state_update.storages.iter().map(|(hashed_address, storage)| { + (*hashed_address, storage.storage.keys().copied().collect()) + })) + .collect::>(); + + rayon::spawn(move || { + let provider = match view.provider_ro() { + Ok(provider) => provider, + Err(error) => { + error!(target: "engine::root", ?error, "Could not get provider"); + return; + } + }; + + // TODO: replace with parallel proof + let result = + Proof::overlay_multiproof(provider.tx_ref(), input.as_ref().clone(), targets); + match result { + Ok(proof) => { + let _ = state_root_message_sender.send(StateRootMessage::ProofCalculated { + proof, + sequence_number: proof_sequence_number, + }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate multiproof"); + } + } + }); + + state.extend(hashed_state_update); + } + + /// Handler for new proof calculated, aggregates all the existing sequential proofs. + fn on_proof(&mut self, proof: MultiProof, sequence_number: u64) -> Option { + let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof); + + if ready_proofs.is_empty() { + None + } else { + // combine all ready proofs into one + ready_proofs.into_iter().reduce(|mut acc, proof| { + acc.extend(proof); + acc + }) + } + } + + /// Spawns root calculation with the current state and proofs + fn spawn_root_calculation(&mut self, multiproof: MultiProof) { + if self.calculating_root { + return; + } + self.calculating_root = true; + + trace!( + target: "engine::root", + account_proofs = multiproof.account_subtree.len(), + storage_proofs = multiproof.storages.len(), + "Spawning root calculation" + ); + + let tx = self.tx.clone(); + let view = self.config.consistent_view.clone(); + let input = self.config.input.clone(); + let state = self.state.clone(); + + rayon::spawn(move || { + let result = calculate_state_root_from_proofs( + view, + &input.nodes.clone().into_sorted(), + &input.state.clone().into_sorted(), + multiproof, + state, + ); + match result { + Ok((root, updates, elapsed)) => { + trace!( + target: "engine::root", + %root, + ?elapsed, + "Root calculation completed, sending result" + ); + let _ = tx.send(StateRootMessage::RootCalculated { root, updates, elapsed }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate state root"); + } + } + }); + } + + fn run(mut self) -> StateRootResult { + let mut current_multiproof = MultiProof::default(); + let mut trie_updates = TrieUpdates::default(); + let mut current_root: B256; + let mut updates_received = 0; + let mut proofs_processed = 0; + let mut roots_calculated = 0; + + loop { + match self.rx.recv() { + Ok(message) => match message { + StateRootMessage::StateUpdate(update) => { + updates_received += 1; + trace!( + target: "engine::root", + len = update.len(), + total_updates = updates_received, + "Received new state update" + ); + Self::on_state_update( + self.config.consistent_view.clone(), + self.config.input.clone(), + update, + &mut self.state, + self.proof_sequencer.next_sequence(), + self.tx.clone(), + ); + } + StateRootMessage::ProofCalculated { proof, sequence_number } => { + proofs_processed += 1; + trace!( + target: "engine::root", + sequence = sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof" + ); + + if let Some(combined_proof) = self.on_proof(proof, sequence_number) { + if self.calculating_root { + current_multiproof.extend(combined_proof); + } else { + self.spawn_root_calculation(combined_proof); + } + } + } + StateRootMessage::RootCalculated { root, updates, elapsed } => { + roots_calculated += 1; + trace!( + target: "engine::root", + %root, + ?elapsed, + roots_calculated, + proofs = proofs_processed, + updates = updates_received, + "Computed intermediate root" + ); + current_root = root; + trie_updates.extend(updates); + self.calculating_root = false; + + let has_new_proofs = !current_multiproof.account_subtree.is_empty() || + !current_multiproof.storages.is_empty(); + let all_proofs_received = proofs_processed >= updates_received; + let no_pending = !self.proof_sequencer.has_pending(); + + trace!( + target: "engine::root", + has_new_proofs, + all_proofs_received, + no_pending, + "State check" + ); + + // only spawn new calculation if we have accumulated new proofs + if has_new_proofs { + trace!( + target: "engine::root", + account_proofs = current_multiproof.account_subtree.len(), + storage_proofs = current_multiproof.storages.len(), + "Spawning subsequent root calculation" + ); + self.spawn_root_calculation(std::mem::take(&mut current_multiproof)); + } else if all_proofs_received && no_pending { + debug!( + target: "engine::root", + total_updates = updates_received, + total_proofs = proofs_processed, + roots_calculated, + "All proofs processed, ending calculation" + ); + return Ok((current_root, trie_updates)); + } + } + }, + Err(_) => { + // this means our internal message channel is closed, which shouldn't happen + // in normal operation since we hold both ends + error!( + target: "engine::root", + "Internal message channel closed unexpectedly" + ); + return Err(ParallelStateRootError::Other( + "Internal message channel closed unexpectedly".into(), + )); + } + } + } } } -#[allow(dead_code)] -impl StateRootTask +/// Calculate state root from proofs. +pub fn calculate_state_root_from_proofs( + view: ConsistentDbView, + input_nodes_sorted: &TrieUpdatesSorted, + input_state_sorted: &HashedPostStateSorted, + multiproof: MultiProof, + state: HashedPostState, +) -> ProviderResult<(B256, TrieUpdates, Duration)> where - Factory: Send + 'static, + Factory: DatabaseProviderFactory + Clone, { - fn run(self) -> StateRootResult { - while let Ok(state) = self.state_stream.recv() { - Self::on_state_update(&self.config.consistent_view, &self.config.input, state); - } + let started_at = Instant::now(); - // TODO: - // * keep track of proof calculation - // * keep track of intermediate root computation - // * return final state root result - Ok((B256::default(), TrieUpdates::default())) - } + let proof_targets: HashMap> = state + .accounts + .keys() + .map(|hashed_address| (*hashed_address, HashSet::default())) + .chain(state.storages.iter().map(|(hashed_address, storage)| { + (*hashed_address, storage.storage.keys().copied().collect()) + })) + .collect(); + + let account_trie_nodes = proof_targets + .into_par_iter() + .map_init( + || view.provider_ro().unwrap(), + |provider_ro, (hashed_address, hashed_slots)| { + // Gather and record storage trie nodes for this account. + let mut storage_trie_nodes = BTreeMap::default(); + let storage = state.storages.get(&hashed_address); + for hashed_slot in hashed_slots { + let slot_key = Nibbles::unpack(hashed_slot); + let slot_value = storage + .and_then(|s| s.storage.get(&hashed_slot)) + .filter(|v| !v.is_zero()) + .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); + let proof = multiproof + .storages + .get(&hashed_address) + .map(|proof| { + proof + .subtree + .iter() + .filter(|e| slot_key.starts_with(e.0)) + .collect::>() + }) + .unwrap_or_default(); + storage_trie_nodes.extend(target_nodes( + slot_key.clone(), + slot_value, + None, + proof, + )?); + } + + let storage_root = next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { + // Right pad the target with 0s. + let mut padded_key = key.pack(); + padded_key.resize(32, 0); + let mut targets = HashMap::with_hasher(DefaultHashBuilder::default()); + let mut slots = HashSet::with_hasher(DefaultHashBuilder::default()); + slots.insert(B256::from_slice(&padded_key)); + targets.insert(hashed_address, slots); + let proof = Proof::new( + InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + input_nodes_sorted, + ), + HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + input_state_sorted, + ), + ) + .multiproof(targets) + .unwrap(); + + // The subtree only contains the proof for a single target. + let node = proof + .storages + .get(&hashed_address) + .and_then(|storage_multiproof| storage_multiproof.subtree.get(&key)) + .cloned() + .ok_or(TrieWitnessError::MissingTargetNode(key))?; + Ok(node) + })?; + + // Gather and record account trie nodes. + let account = state + .accounts + .get(&hashed_address) + .ok_or(TrieWitnessError::MissingAccount(hashed_address))?; + let value = (account.is_some() || storage_root != EMPTY_ROOT_HASH).then(|| { + let mut encoded = Vec::with_capacity(128); + TrieAccount::from((account.unwrap_or_default(), storage_root)) + .encode(&mut encoded as &mut dyn BufMut); + encoded + }); + let key = Nibbles::unpack(hashed_address); + let proof = multiproof.account_subtree.iter().filter(|e| key.starts_with(e.0)); + target_nodes(key.clone(), value, None, proof) + }, + ) + .try_reduce(BTreeMap::new, |mut acc, map| { + acc.extend(map.into_iter()); + Ok(acc) + })?; + + let provider_ro = view.provider_ro()?; + + let state_root = next_root_from_proofs(account_trie_nodes, |key: Nibbles| { + // Right pad the target with 0s. + let mut padded_key = key.pack(); + padded_key.resize(32, 0); + let mut targets = HashMap::with_hasher(DefaultHashBuilder::default()); + targets.insert( + B256::from_slice(&padded_key), + HashSet::with_hasher(DefaultHashBuilder::default()), + ); + let proof = Proof::new( + InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + input_nodes_sorted, + ), + HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + input_state_sorted, + ), + ) + .multiproof(targets) + .unwrap(); + + // The subtree only contains the proof for a single target. + let node = proof + .account_subtree + .get(&key) + .cloned() + .ok_or(TrieWitnessError::MissingTargetNode(key))?; + Ok(node) + })?; + + Ok((state_root, Default::default(), started_at.elapsed())) } /// Updates the sparse trie with the given proofs and state, and returns the updated trie and the @@ -216,56 +688,250 @@ fn update_sparse_trie( #[cfg(test)] mod tests { use super::*; - use reth_provider::{providers::ConsistentDbView, test_utils::MockEthProvider}; - use reth_trie::TrieInput; + use reth_primitives::{Account as RethAccount, StorageEntry}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; + use reth_testing_utils::generators::{self, Rng}; + use reth_trie::{test_utils::state_root, TrieInput}; use revm_primitives::{ - Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, - HashMap, B256, U256, + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, + HashMap, B256, KECCAK_EMPTY, U256, }; use std::sync::Arc; - fn create_mock_config() -> StateRootConfig { - let factory = MockEthProvider::default(); - let view = ConsistentDbView::new(factory, None); - let input = Arc::new(TrieInput::default()); - StateRootConfig { consistent_view: view, input } - } - - fn create_mock_state() -> revm_primitives::EvmState { - let mut state_changes: EvmState = HashMap::default(); - let storage = EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); - let account = Account { - info: AccountInfo { - balance: U256::from(100), - nonce: 10, - code_hash: B256::random(), - code: Default::default(), + fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) }, - storage, - status: AccountStatus::Loaded, - }; + } + } - let address = Address::random(); - state_changes.insert(address, account); + fn create_mock_state_updates(num_accounts: usize, updates_per_account: usize) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + if rng.gen_bool(0.7) { + for _ in 0..rng.gen_range(1..10) { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } - state_changes + updates } #[test] fn test_state_root_task() { - let config = create_mock_config(); + reth_tracing::init_test_tracing(); + + let factory = create_test_provider_factory(); let (tx, rx) = std::sync::mpsc::channel(); - let stream = StdReceiverStream::new(rx); - let task = StateRootTask::new(config, stream); + let state_updates = create_mock_state_updates(10, 10); + let mut hashed_state = HashedPostState::default(); + let mut accumulated_state: HashMap)> = + HashMap::default(); + + { + let provider_rw = factory.provider_rw().expect("failed to get provider"); + + for update in &state_updates { + let account_updates = update.iter().map(|(address, account)| { + (*address, Some(convert_revm_to_reth_account(account))) + }); + provider_rw + .insert_account_for_hashing(account_updates) + .expect("failed to insert accounts"); + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| { + StorageEntry { key: B256::from(*slot), value: value.present_value } + }); + (*address, storage_entries) + }); + provider_rw + .insert_storage_for_hashing(storage_updates) + .expect("failed to insert storage"); + } + provider_rw.commit().expect("failed to commit changes"); + } + + for update in &state_updates { + for (address, account) in update { + let hashed_address = keccak256(*address); + + if account.is_touched() { + let destroyed = account.is_selfdestructed(); + hashed_state.accounts.insert( + hashed_address, + if destroyed || account.is_empty() { + None + } else { + Some(account.info.clone().into()) + }, + ); + + if destroyed || !account.storage.is_empty() { + let storage = account + .storage + .iter() + .filter(|&(_slot, value)| (!destroyed && value.is_changed())) + .map(|(slot, value)| { + (keccak256(B256::from(*slot)), value.present_value) + }); + hashed_state + .storages + .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + } + } + + let storage: HashMap = account + .storage + .iter() + .map(|(k, v)| (B256::from(*k), v.present_value)) + .collect(); + + let entry = accumulated_state.entry(*address).or_default(); + entry.0 = convert_revm_to_reth_account(account); + entry.1.extend(storage); + } + } + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: Arc::new(TrieInput::from_state(hashed_state)), + }; + let task = StateRootTask::new(config, tx.clone(), rx); let handle = task.spawn(); - for _ in 0..10 { - tx.send(create_mock_state()).expect("failed to send state"); + for update in state_updates { + tx.send(StateRootMessage::StateUpdate(update)).expect("failed to send state"); } drop(tx); - let result = handle.wait_for_result(); - assert!(result.is_ok(), "sync block execution failed"); + let (root_from_task, _) = handle.wait_for_result().expect("task failed"); + let root_from_base = state_root(accumulated_state); + + assert_eq!( + root_from_task, root_from_base, + "State root mismatch: task={root_from_task:?}, base={root_from_base:?}" + ); + } + + #[test] + fn test_add_proof_in_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + sequencer.next_sequence = 2; + + let ready = sequencer.add_proof(0, proof1); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_out_of_order() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(2, proof3); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(0, proof1); + assert_eq!(ready.len(), 1); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2); + assert_eq!(ready.len(), 2); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_with_gaps() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(0, proof1); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(2, proof3); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + } + + #[test] + fn test_add_proof_duplicate_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + + let ready = sequencer.add_proof(0, proof1); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(0, proof2); + assert_eq!(ready.len(), 0); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_batch_processing() { + let mut sequencer = ProofSequencer::new(); + let proofs: Vec<_> = (0..5).map(|_| MultiProof::default()).collect(); + sequencer.next_sequence = 5; + + sequencer.add_proof(4, proofs[4].clone()); + sequencer.add_proof(2, proofs[2].clone()); + sequencer.add_proof(1, proofs[1].clone()); + sequencer.add_proof(3, proofs[3].clone()); + + let ready = sequencer.add_proof(0, proofs[0].clone()); + assert_eq!(ready.len(), 5); + assert!(!sequencer.has_pending()); } } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index dcb1a0231dd..f285079f252 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -33,7 +33,7 @@ pub struct ParallelProof { /// Consistent view of the database. view: ConsistentDbView, /// Trie input. - input: TrieInput, + input: Arc, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -41,7 +41,7 @@ pub struct ParallelProof { impl ParallelProof { /// Create new state proof generator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + pub fn new(view: ConsistentDbView, input: Arc) -> Self { Self { view, input, @@ -62,8 +62,8 @@ where ) -> Result { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + let trie_nodes_sorted = self.input.nodes.clone().into_sorted(); + let hashed_state_sorted = self.input.state.clone().into_sorted(); // Extend prefix sets with targets let mut prefix_sets = self.input.prefix_sets.clone(); diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index b4e300c7290..8d2b18f5e11 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -4,6 +4,7 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; +use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, @@ -225,6 +226,9 @@ pub enum ParallelStateRootError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Other unspecified error. + #[error("{_0}")] + Other(String), } impl From for ProviderError { @@ -234,6 +238,7 @@ impl From for ProviderError { ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { Self::Database(error) } + ParallelStateRootError::Other(other) => Self::Database(DatabaseError::Other(other)), } } } diff --git a/crates/trie/trie/src/input.rs b/crates/trie/trie/src/input.rs index 18f9ada2f4a..ea71558c2c1 100644 --- a/crates/trie/trie/src/input.rs +++ b/crates/trie/trie/src/input.rs @@ -1,7 +1,7 @@ use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; /// Inputs for trie-related computations. -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct TrieInput { /// The collection of cached in-memory intermediate trie nodes that /// can be reused for computation. From 7a6a725d914cbe35b44b03d890341b2835ebe879 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 27 Nov 2024 19:31:13 +0400 Subject: [PATCH 726/970] feat: add `Receipt` AT to writer traits (#12892) --- .../commands/debug_cmd/in_memory_merkle.rs | 7 +- bin/reth/src/commands/debug_cmd/merkle.rs | 7 +- .../src/commands/debug_cmd/replay_engine.rs | 5 +- .../cli/commands/src/stage/dump/execution.rs | 19 +- crates/cli/commands/src/stage/dump/merkle.rs | 12 +- crates/exex/exex/src/backfill/test_utils.rs | 5 +- crates/node/builder/src/launch/common.rs | 2 + crates/node/builder/src/setup.rs | 14 +- .../cli/src/commands/import_receipts.rs | 11 +- crates/primitives/src/receipt.rs | 7 +- crates/stages/stages/src/stages/execution.rs | 7 +- crates/storage/db-common/src/init.rs | 16 +- crates/storage/db/src/tables/mod.rs | 4 +- .../src/providers/blockchain_provider.rs | 2 +- .../provider/src/providers/consistent.rs | 5 +- .../src/providers/database/provider.rs | 171 +++++++++--------- crates/storage/provider/src/providers/mod.rs | 6 +- .../src/providers/static_file/manager.rs | 6 +- .../src/providers/static_file/writer.rs | 5 +- crates/storage/provider/src/traits/block.rs | 4 +- crates/storage/provider/src/traits/mod.rs | 2 +- crates/storage/provider/src/traits/state.rs | 16 +- crates/storage/provider/src/writer/mod.rs | 36 ++-- crates/trie/parallel/benches/root.rs | 3 +- 24 files changed, 211 insertions(+), 161 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index d592e956c20..870dc1ddf23 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -60,7 +60,10 @@ impl> Command { async fn build_network< N: ProviderNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >( &self, @@ -178,7 +181,7 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - provider_rw.write_to_storage( + provider_rw.write_state( execution_outcome, OriginalValuesKnown::No, StorageLocation::Database, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index ba6fd12f895..78e32df5266 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -59,7 +59,10 @@ impl> Command { async fn build_network< N: ProviderNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >( &self, @@ -163,7 +166,7 @@ impl> Command { executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); - provider_rw.write_to_storage( + provider_rw.write_state( execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 40987167391..04d3b5763ae 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -58,7 +58,10 @@ impl> Command { async fn build_network< N: ProviderNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >( &self, diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 4afcdf4461e..000c1b542db 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -28,7 +28,10 @@ pub(crate) async fn dump_execution_stage( where N: ProviderNodeTypes< DB = Arc, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, E: BlockExecutorProvider, { @@ -136,7 +139,12 @@ fn import_tables_with_range( /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. fn unwind_and_copy< - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, >( db_tool: &DbTool, from: u64, @@ -174,7 +182,12 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 3fa0c4f0728..ce187437218 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -28,7 +28,10 @@ use tracing::info; pub(crate) async fn dump_merkle_stage< N: ProviderNodeTypes< DB = Arc, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >( db_tool: &DbTool, @@ -74,7 +77,12 @@ pub(crate) async fn dump_merkle_stage< /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. fn unwind_and_copy< - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, >( db_tool: &DbTool, range: (u64, u64), diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 861d42f042b..6d93314e22b 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -62,6 +62,7 @@ where Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, >, >, { @@ -171,6 +172,7 @@ where Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, >, >, { @@ -194,7 +196,8 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: + FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 987839912f5..830909c8cc4 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -385,6 +385,7 @@ where N::Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, >, { let factory = ProviderFactory::new( @@ -455,6 +456,7 @@ where N::Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, >, { let factory = self.create_provider_factory().await?; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index ea0e6b9fe79..092c1fdf651 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -41,8 +41,11 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, - N::Primitives: - FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -90,8 +93,11 @@ where H: HeaderDownloader
+ 'static, B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, - N::Primitives: - FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { let mut builder = Pipeline::::builder(); diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 59d596685de..a5c12a48cfb 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -15,7 +15,7 @@ use reth_execution_types::ExecutionOutcome; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; -use reth_primitives::Receipts; +use reth_primitives::{NodePrimitives, Receipts}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, @@ -85,7 +85,10 @@ pub async fn import_receipts_from_file( filter: F, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + ChainSpec = OpChainSpec, + Primitives: NodePrimitives, + >, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { @@ -123,7 +126,7 @@ pub async fn import_receipts_from_reader( mut filter: F, ) -> eyre::Result where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, F: FnMut(u64, &mut Receipts) -> usize, { let static_file_provider = provider_factory.static_file_provider(); @@ -219,7 +222,7 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - provider.write_to_storage( + provider.write_state( execution_outcome, OriginalValuesKnown::Yes, StorageLocation::StaticFiles, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 77a44dc39e5..95d707d1b2d 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -131,7 +131,6 @@ impl InMemorySize for Receipt { Debug, PartialEq, Eq, - Default, Serialize, Deserialize, From, @@ -187,6 +186,12 @@ impl From for ReceiptWithBloom { } } +impl Default for Receipts { + fn default() -> Self { + Self { receipt_vec: Vec::new() } + } +} + /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 297130c34cb..ce969f2577d 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -16,7 +16,7 @@ use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimit use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, StateChangeWriter, StateCommitmentProvider, StateWriter, + OriginalValuesKnown, ProviderError, StateCommitmentProvider, StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; @@ -262,9 +262,8 @@ where + BlockReader + StaticFileProviderFactory + StatsReader - + StateChangeWriter + BlockHashReader - + StateWriter + + StateWriter + StateCommitmentProvider, { /// Return the id of the stage @@ -432,7 +431,7 @@ where let time = Instant::now(); // write output - provider.write_to_storage(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; + provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 367190b587e..ec31edd0682 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -13,8 +13,8 @@ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, - StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -75,7 +75,7 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + + StateWriter + StateWriter + AsRef, { @@ -146,7 +146,6 @@ pub fn insert_genesis_state<'a, 'b, Provider>( where Provider: StaticFileProviderFactory + DBProvider - + StateChangeWriter + HeaderProvider + StateWriter + AsRef, @@ -163,7 +162,6 @@ pub fn insert_state<'a, 'b, Provider>( where Provider: StaticFileProviderFactory + DBProvider - + StateChangeWriter + HeaderProvider + StateWriter + AsRef, @@ -233,11 +231,7 @@ where Vec::new(), ); - provider.write_to_storage( - execution_outcome, - OriginalValuesKnown::Yes, - StorageLocation::Database, - )?; + provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; trace!(target: "reth::cli", "Inserted state"); @@ -355,7 +349,6 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + TrieWriter + StateWriter + AsRef, @@ -478,7 +471,6 @@ where + HashingWriter + HistoryWriter + StateWriter - + StateChangeWriter + AsRef, { let accounts_len = collector.len(); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index aafdf606bb3..a1fea62f0d8 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -346,9 +346,9 @@ tables! { } /// Canonical only Stores transaction receipts. - table Receipts { + table Receipts { type Key = TxNumber; - type Value = Receipt; + type Value = R; } /// Stores all smart contract bytecodes. diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 37d984e6774..08f5e4680a2 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -145,7 +145,7 @@ impl BlockchainProvider2 { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { self.consistent_provider()?.get_state(range) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index cf473a1fbff..e70f4b4e5e1 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1512,7 +1512,10 @@ impl StateReader for ConsistentProvider { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { let state = state.block_ref().execution_outcome().clone(); Ok(Some(state)) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 56949201752..05f2501c102 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -15,10 +15,9 @@ use crate::{ HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateCommitmentProvider, StateProviderBox, StateWriter, - StaticFileProviderFactory, StatsReader, StorageLocation, StorageReader, StorageTrieWriter, - TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, - WithdrawalsProvider, + StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, + StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -351,7 +350,7 @@ impl DatabaseProvider ProviderResult<()> { if remove_from.database() { // iterate over block body and remove receipts - self.remove::(from_tx..)?; + self.remove::>>(from_tx..)?; } if remove_from.static_files() && !self.prune_modes.has_receipts_pruning() { @@ -1536,7 +1535,7 @@ impl ReceiptProvider for DatabasePr StaticFileSegment::Receipts, id, |static_file| static_file.receipt(id), - || Ok(self.tx.get::(id)?), + || Ok(self.tx.get::>(id)?), ) } @@ -1573,7 +1572,10 @@ impl ReceiptProvider for DatabasePr StaticFileSegment::Receipts, to_range(range), |static_file, range, _| static_file.receipts_by_tx_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range) + .map_err(Into::into) + }, |_| true, ) } @@ -1798,9 +1800,77 @@ impl StorageReader for DatabaseProvider } } -impl StateChangeWriter +impl StateWriter for DatabaseProvider { + type Receipt = ReceiptTy; + + fn write_state( + &self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, + ) -> ProviderResult<()> { + let (plain_state, reverts) = + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + + self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_changes(plain_state)?; + + let mut bodies_cursor = self.tx.cursor_read::()?; + + let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || + execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); + + // Prepare receipts cursor if we are going to write receipts to the database + // + // We are writing to database if requested or if there's any kind of receipt pruning + // configured + let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) + .then(|| self.tx.cursor_write::>()) + .transpose()?; + + // Prepare receipts static writer if we are going to write receipts to static files + // + // We are writing to static files if requested and if there's no receipt pruning configured + let mut receipts_static_writer = (write_receipts_to.static_files() && + !has_receipts_pruning) + .then(|| { + self.static_file_provider + .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) + }) + .transpose()?; + + for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + let block_number = execution_outcome.first_block + idx as u64; + + // Increment block number for receipts static file writer + if let Some(writer) = receipts_static_writer.as_mut() { + writer.increment_block(block_number)?; + } + + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for (idx, receipt) in receipts.into_iter().enumerate() { + let receipt_idx = first_tx_index + idx as u64; + if let Some(receipt) = receipt { + if let Some(writer) = &mut receipts_static_writer { + writer.append_receipt(receipt_idx, &receipt)?; + } + + if let Some(cursor) = &mut receipts_cursor { + cursor.append(receipt_idx, receipt)?; + } + } + } + } + + Ok(()) + } + fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2089,7 +2159,7 @@ impl StateChangeWriter &self, block: BlockNumber, remove_receipts_from: StorageLocation, - ) -> ProviderResult { + ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; if range.is_empty() { @@ -2172,7 +2242,7 @@ impl StateChangeWriter }, |range, _| { self.tx - .cursor_read::()? + .cursor_read::>()? .walk_range(range)? .map(|r| r.map_err(Into::into)) .collect() @@ -2709,6 +2779,7 @@ impl BlockWrite for DatabaseProvider { type Block = BlockTy; + type Receipt = ReceiptTy; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -2976,7 +3047,7 @@ impl BlockWrite fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { @@ -2998,11 +3069,7 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertBlock); } - self.write_to_storage( - execution_outcome, - OriginalValuesKnown::No, - StorageLocation::Database, - )?; + self.write_state(execution_outcome, OriginalValuesKnown::No, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes @@ -3050,7 +3117,7 @@ impl PruneCheckpointWriter for DatabaseProvider StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3122,73 +3189,3 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } } - -impl StateWriter - for DatabaseProvider -{ - fn write_to_storage( - &self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - write_receipts_to: StorageLocation, - ) -> ProviderResult<()> { - let (plain_state, reverts) = - execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - - self.write_state_reverts(reverts, execution_outcome.first_block)?; - self.write_state_changes(plain_state)?; - - let mut bodies_cursor = self.tx.cursor_read::()?; - - let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || - execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); - - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) - .then(|| self.tx.cursor_write::()) - .transpose()?; - - // Prepare receipts static writer if we are going to write receipts to static files - // - // We are writing to static files if requested and if there's no receipt pruning configured - let mut receipts_static_writer = (write_receipts_to.static_files() && - !has_receipts_pruning) - .then(|| { - self.static_file_provider - .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) - }) - .transpose()?; - - for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { - let block_number = execution_outcome.first_block + idx as u64; - - // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } - - let first_tx_index = bodies_cursor - .seek_exact(block_number)? - .map(|(_, indices)| indices.first_tx_num()) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for (idx, receipt) in receipts.into_iter().enumerate() { - let receipt_idx = first_tx_index + idx as u64; - if let Some(receipt) = receipt { - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, &receipt)?; - } - - if let Some(cursor) = &mut receipts_cursor { - cursor.append(receipt_idx, receipt)?; - } - } - } - } - - Ok(()) - } -} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 92c94952a34..6631b5b1b31 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -79,8 +79,8 @@ where Storage: ChainStorage, Primitives: FullNodePrimitives< SignedTx: Value, + Receipt: Value, BlockHeader = alloy_consensus::Header, - Receipt = reth_primitives::Receipt, >, >, { @@ -92,8 +92,8 @@ impl NodeTypesForProvider for T where Storage: ChainStorage, Primitives: FullNodePrimitives< SignedTx: Value, + Receipt: Value, BlockHeader = alloy_consensus::Header, - Receipt = reth_primitives::Receipt, >, > { @@ -541,7 +541,7 @@ impl ReceiptProvider for BlockchainProvider { } } -impl ReceiptProviderIdExt for BlockchainProvider { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 2f32edcc294..e1916696193 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -38,8 +38,8 @@ use reth_primitives::{ DEFAULT_BLOCKS_PER_STATIC_FILE, }, transaction::recover_signers, - BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, - TransactionMeta, TransactionSignedNoHash, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; @@ -1692,7 +1692,7 @@ impl StatsReader for StaticFileProvider { .map(|block| block + 1) .unwrap_or_default() as usize), - tables::Receipts::NAME => Ok(self + tables::Receipts::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 83954bde352..6f5335ec665 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -585,7 +585,10 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &Receipt) -> ProviderResult<()> { + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> + where + N::Receipt: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index e7669b0eade..d12f240e616 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -95,6 +95,8 @@ pub trait StateReader: Send + Sync { pub trait BlockWriter: Send + Sync { /// The body this writer can write. type Block: reth_primitives_traits::Block; + /// The receipt type for [`ExecutionOutcome`]. + type Receipt: Send + Sync; /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. @@ -154,7 +156,7 @@ pub trait BlockWriter: Send + Sync { fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index a772204d0c1..d82e97d1db7 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -13,7 +13,7 @@ mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::{StateChangeWriter, StateWriter}; +pub use state::StateWriter; pub use reth_chainspec::ChainSpecProvider; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 2e46e285070..2c4ee2cfa8d 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -9,20 +9,20 @@ use revm::db::{ use super::StorageLocation; -/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. +/// A trait specifically for writing state changes or reverts pub trait StateWriter { - /// Write the data and receipts to the database or static files if `static_file_producer` is + /// Receipt type included into [`ExecutionOutcome`]. + type Receipt; + + /// Write the state and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( + fn write_state( &self, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()>; -} -/// A trait specifically for writing state changes or reverts -pub trait StateChangeWriter { /// Write state reverts to the database. /// /// NOTE: Reverts will delete all wiped storage from plain state. @@ -52,5 +52,5 @@ pub trait StateChangeWriter { &self, block: BlockNumber, remove_receipts_from: StorageLocation, - ) -> ProviderResult; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index c0eeb64b8a2..02e912050d5 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,12 +1,14 @@ use crate::{ providers::{StaticFileProvider, StaticFileWriter as SfWriter}, - BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, - StaticFileProviderFactory, StorageLocation, TrieWriter, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateWriter, StaticFileProviderFactory, + StorageLocation, TrieWriter, }; +use alloy_consensus::BlockHeader; use reth_chain_state::ExecutedBlock; use reth_db::transaction::{DbTx, DbTxMut}; use reth_errors::ProviderResult; -use reth_primitives::StaticFileSegment; +use reth_primitives::{NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; @@ -119,9 +121,8 @@ impl UnifiedStorageWriter<'_, (), ()> { impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider - + BlockWriter + + BlockWriter + TransactionsProviderExt - + StateChangeWriter + TrieWriter + StateWriter + HistoryWriter @@ -131,7 +132,11 @@ where + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. - pub fn save_blocks(&self, blocks: Vec) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> + where + N: NodePrimitives, + ProviderDB: BlockWriter + StateWriter, + { if blocks.is_empty() { debug!(target: "provider::storage_writer", "Attempted to write empty block range"); return Ok(()) @@ -139,9 +144,10 @@ where // NOTE: checked non-empty above let first_block = blocks.first().unwrap().block(); + let last_block = blocks.last().unwrap().block(); - let first_number = first_block.number; - let last_block_number = last_block.number; + let first_number = first_block.number(); + let last_block_number = last_block.number(); debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); @@ -162,7 +168,7 @@ where // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - self.database().write_to_storage( + self.database().write_state( Arc::unwrap_or_clone(execution_output), OriginalValuesKnown::No, StorageLocation::StaticFiles, @@ -490,7 +496,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -590,7 +596,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -657,7 +663,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -805,7 +811,7 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -970,7 +976,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1017,7 +1023,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_to_storage(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index eb5b6575b9f..a9300efa9b0 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,8 +5,7 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateChangeWriter, - TrieWriter, + providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, From 7a6053078c5fcff1c30d7142c5ef1e03ab720f0c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 27 Nov 2024 19:54:58 +0400 Subject: [PATCH 727/970] fix: small db provider fixes (#12908) --- .../src/providers/database/provider.rs | 21 +++++++++++-------- .../src/providers/static_file/manager.rs | 13 +++++++----- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 05f2501c102..da584744530 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -21,6 +21,7 @@ use crate::{ }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; @@ -49,7 +50,6 @@ use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSignedNoHash, }; use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -1327,7 +1327,7 @@ impl TransactionsProviderExt tx_range, |static_file, range, _| static_file.transaction_hashes_by_range(range), |tx_range, _| { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let tx_range_size = tx_range.clone().count(); let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -1336,12 +1336,15 @@ impl TransactionsProviderExt let mut transaction_count = 0; #[inline] - fn calculate_hash( - entry: Result<(TxNumber, TransactionSignedNoHash), DatabaseError>, + fn calculate_hash( + entry: Result<(TxNumber, T), DatabaseError>, rlp_buf: &mut Vec, - ) -> Result<(B256, TxNumber), Box> { + ) -> Result<(B256, TxNumber), Box> + where + T: Encodable2718, + { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } @@ -2904,7 +2907,7 @@ impl BlockWrite .then(|| self.tx.cursor_write::>>()) .transpose()?; - // Get id for the next tx_num of zero if there are no transactions. + // Get id for the next tx_num or zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); for (block_number, body) in &bodies { @@ -2992,7 +2995,7 @@ impl BlockWrite .1 .last_tx_num(); - if unwind_tx_from < unwind_tx_to { + if unwind_tx_from <= unwind_tx_to { for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { self.tx.delete::(hash, None)?; } @@ -3023,7 +3026,7 @@ impl BlockWrite self.remove::(unwind_tx_from..)?; if remove_transactions_from.database() { - self.remove::(unwind_tx_from..)?; + self.remove::>>(unwind_tx_from..)?; } if remove_transactions_from.static_files() { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e1916696193..3b49f8d401f 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1422,7 +1422,7 @@ impl> TransactionsProvide chunk_range, |cursor, number| { Ok(cursor - .get_one::>(number.into())? + .get_one::>(number.into())? .map(|transaction| { rlp_buf.clear(); let _ = channel_tx @@ -1708,11 +1708,14 @@ impl StatsReader for StaticFileProvider { /// Calculates the tx hash for the given transaction and its id. #[inline] -fn calculate_hash( - entry: (TxNumber, TransactionSignedNoHash), +fn calculate_hash( + entry: (TxNumber, T), rlp_buf: &mut Vec, -) -> Result<(B256, TxNumber), Box> { +) -> Result<(B256, TxNumber), Box> +where + T: Encodable2718, +{ let (tx_id, tx) = entry; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } From 26bfe7c0375cd88db061bb3847e1bb1236058565 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 27 Nov 2024 17:18:54 +0100 Subject: [PATCH 728/970] feat: `SparseStateTrie::reveal_multiproof` (#12909) --- crates/trie/sparse/src/state.rs | 60 +++++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 6 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index ec88abfd19e..f17a944df1a 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -8,7 +8,7 @@ use alloy_primitives::{ use alloy_rlp::Decodable; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, - Nibbles, TrieNode, + MultiProof, Nibbles, TrieNode, }; /// Sparse state trie representing lazy-loaded Ethereum state trie. @@ -60,7 +60,7 @@ impl SparseStateTrie { let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. let trie = self.state.reveal_root(root_node, self.retain_updates)?; @@ -91,7 +91,7 @@ impl SparseStateTrie { let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. let trie = self @@ -112,8 +112,56 @@ impl SparseStateTrie { Ok(()) } + /// Reveal unknown trie paths from multiproof and the list of included accounts and slots. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_multiproof( + &mut self, + targets: HashMap>, + multiproof: MultiProof, + ) -> SparseStateTrieResult<()> { + let account_subtree = multiproof.account_subtree.into_nodes_sorted(); + let mut account_nodes = account_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in account_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + + for (account, storage_subtree) in multiproof.storages { + let storage_subtree = storage_subtree.subtree.into_nodes_sorted(); + let mut storage_nodes = storage_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { + // Reveal root node if it wasn't already. + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in storage_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + } + + for (account, slots) in targets { + self.revealed.entry(account).or_default().extend(slots); + } + + Ok(()) + } + /// Validates the root node of the proof and returns it if it exists and is valid. - fn validate_proof>( + fn validate_root_node>( &self, proof: &mut Peekable, ) -> SparseStateTrieResult> { @@ -232,7 +280,7 @@ mod tests { let sparse = SparseStateTrie::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -245,7 +293,7 @@ mod tests { (Nibbles::from_nibbles([0x1]), Bytes::new()), ]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } From f9ad3f8cca2c50b3e56525d9c18264cbfa0ef7a9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 17:22:23 +0100 Subject: [PATCH 729/970] chore: add fs-util::open (#12911) --- Cargo.lock | 1 + crates/cli/commands/src/init_state/mod.rs | 5 ++--- crates/fs-util/src/lib.rs | 6 ++++++ crates/optimism/cli/Cargo.toml | 1 + crates/optimism/cli/src/commands/init_state.rs | 4 ++-- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 01e53bb4632..33d60eac3f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8237,6 +8237,7 @@ dependencies = [ "reth-downloaders", "reth-errors", "reth-execution-types", + "reth-fs-util", "reth-network-p2p", "reth-node-builder", "reth-node-core", diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 2aa2483fdda..bdade252a66 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -11,7 +11,7 @@ use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use std::{io::BufReader, path::PathBuf, str::FromStr}; use tracing::info; pub mod without_evm; @@ -115,8 +115,7 @@ impl> InitStateC info!(target: "reth::cli", "Initiating state dump"); - let file = File::open(self.state)?; - let reader = BufReader::new(file); + let reader = BufReader::new(reth_fs_util::open(self.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d242ecc98e2..c1aa4900e03 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -210,6 +210,12 @@ impl FsPathError { } } +/// Wrapper for [`File::open`]. +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::open(path).map_err(|err| FsPathError::open(err, path)) +} + /// Wrapper for `std::fs::read_to_string` pub fn read_to_string(path: impl AsRef) -> Result { let path = path.as_ref(); diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index d090075927a..b61a4628f4d 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -26,6 +26,7 @@ reth-execution-types.workspace = true reth-node-core.workspace = true reth-optimism-node.workspace = true reth-primitives.workspace = true +reth-fs-util.workspace = true # so jemalloc metrics can be included reth-node-metrics.workspace = true diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 6a36f492c50..7bbfc3bb820 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -11,7 +11,7 @@ use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader}; +use std::io::BufReader; use tracing::info; /// Initializes the database with the genesis block. @@ -70,7 +70,7 @@ impl> InitStateCommandOp { info!(target: "reth::cli", "Initiating state dump"); - let reader = BufReader::new(File::open(self.init_state.state)?); + let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; provider_rw.commit()?; From 2705e3a7dd50747f2bb2cc042df0e034fa7a98c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 17:34:16 +0100 Subject: [PATCH 730/970] feat: add helpers for opcode tracing (#12899) --- crates/rpc/rpc-testing-util/src/trace.rs | 78 ++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index b963fa69d8b..ee3fce68d3b 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -5,6 +5,7 @@ use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, + opcode::BlockOpcodeGas, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, tracerequest::TraceCallRequest, }; @@ -23,6 +24,9 @@ type RawTransactionTraceResult<'a> = /// A result type for the `trace_block` method that also captures the requested block. pub type TraceBlockResult = Result<(Vec, BlockId), (RpcError, BlockId)>; +/// A result type for the `trace_blockOpcodeGas` method that also captures the requested block. +pub type TraceBlockOpCodeGasResult = Result<(BlockOpcodeGas, BlockId), (RpcError, BlockId)>; + /// Type alias representing the result of replaying a transaction. pub type ReplayTransactionResult = Result<(TraceResults, TxHash), (RpcError, TxHash)>; @@ -65,6 +69,18 @@ pub trait TraceApiExt { I: IntoIterator, B: Into; + /// Returns a new stream that yields the traces the opcodes for the given blocks. + /// + /// See also [`StreamExt::buffered`]. + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into; + /// Returns a new stream that replays the transactions for the given transaction hashes. /// /// This returns all results in order. @@ -269,6 +285,26 @@ impl TraceApiExt for T { TraceBlockStream { stream: Box::pin(stream) } } + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into, + { + let blocks = params.into_iter().map(|b| b.into()).collect::>(); + let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move { + match self.trace_block_opcode_gas(block).await { + Ok(result) => Ok((result.unwrap(), block)), + Err(err) => Err((err, block)), + } + })) + .buffered(n); + TraceBlockOpcodeGasStream { stream: Box::pin(stream) } + } + fn replay_transactions( &self, tx_hashes: I, @@ -406,6 +442,38 @@ impl std::fmt::Debug for TraceBlockStream<'_> { } } +/// A stream that yields the opcodes for the requested blocks. +#[must_use = "streams do nothing unless polled"] +pub struct TraceBlockOpcodeGasStream<'a> { + stream: Pin + 'a>>, +} + +impl TraceBlockOpcodeGasStream<'_> { + /// Returns the next error result of the stream. + pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { + loop { + match self.next().await? { + Ok(_) => continue, + Err(err) => return Some(err), + } + } + } +} + +impl Stream for TraceBlockOpcodeGasStream<'_> { + type Item = TraceBlockOpCodeGasResult; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.as_mut().poll_next(cx) + } +} + +impl std::fmt::Debug for TraceBlockOpcodeGasStream<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TraceBlockOpcodeGasStream").finish_non_exhaustive() + } +} + /// A utility to compare RPC responses from two different clients. /// /// The `RpcComparer` is designed to perform comparisons between two RPC clients. @@ -670,4 +738,14 @@ mod tests { println!("Total successes: {successes}"); println!("Total failures: {failures}"); } + + #[tokio::test] + #[ignore] + async fn block_opcode_gas_stream() { + let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap(); + let block = vec![BlockNumberOrTag::Latest]; + let mut stream = client.trace_block_opcode_gas_unordered(block, 2); + assert_is_stream(&stream); + let _opcodes = stream.next().await.unwrap(); + } } From 2700db1258070919d859b6c29b7afb9c229772c9 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 27 Nov 2024 18:59:58 +0100 Subject: [PATCH 731/970] feat(trie): `SparseStateTrie::remove_storage_leaf` (#12912) --- crates/trie/sparse/src/state.rs | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index f17a944df1a..cd677ad5cd6 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -198,16 +198,6 @@ impl SparseStateTrie { Ok(()) } - /// Returns sparse trie root if the trie has been revealed. - pub fn root(&mut self) -> Option { - self.state.root() - } - - /// Calculates the hashes of the nodes below the provided level. - pub fn calculate_below_level(&mut self, level: usize) { - self.state.calculate_below_level(level); - } - /// Update the leaf node of a storage trie at the provided address. pub fn update_storage_leaf( &mut self, @@ -219,6 +209,16 @@ impl SparseStateTrie { Ok(()) } + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().remove_leaf(slot)?; + Ok(()) + } + /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { let Some(trie) = self.storages.get_mut(&address) else { return Ok(()) }; @@ -226,11 +226,21 @@ impl SparseStateTrie { trie.wipe().map_err(Into::into) } + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.state.calculate_below_level(level); + } + /// Returns storage sparse trie root if the trie has been revealed. pub fn storage_root(&mut self, account: B256) -> Option { self.storages.get_mut(&account).and_then(|trie| trie.root()) } + /// Returns sparse trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + self.state.root() + } + /// Returns [`TrieUpdates`] by taking the updates from the revealed sparse tries. /// /// Returns `None` if the accounts trie is not revealed. From 00c5b69af46f4a781b1b15afd7587592443e91a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 19:23:57 +0100 Subject: [PATCH 732/970] feat: integrate node primitives in engine handler (#12914) --- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 28 ++++++++++++++++++++-------- crates/primitives/src/lib.rs | 1 + 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 3575bc133c6..e2b5e056d02 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -91,7 +91,7 @@ where let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db.clone(), executor_factory, consensus, diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 49233439e0a..44d145c9c0b 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -92,7 +92,7 @@ where let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db, executor_factory, consensus, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a2680563925..e9e86d3b09b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -36,7 +36,9 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{ + Block, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, SealedHeader, +}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, @@ -51,6 +53,7 @@ use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, + marker::PhantomData, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -469,7 +472,7 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler { provider: P, executor_provider: E, consensus: Arc, @@ -509,10 +512,12 @@ pub struct EngineApiTreeHandler { invalid_block_hook: Box, /// The engine API variant of this handler engine_kind: EngineApiKind, + /// Captures the types the engine operates on + _primtives: PhantomData, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -535,8 +540,9 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where + N: NodePrimitives, P: DatabaseProviderFactory + BlockReader + StateProviderFactory @@ -584,6 +590,7 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, + _primtives: Default::default(), } } @@ -2624,7 +2631,7 @@ mod tests { use reth_engine_primitives::ForkchoiceStatus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::BlockExt; + use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2689,8 +2696,13 @@ mod tests { } struct TestHarness { - tree: - EngineApiTreeHandler, + tree: EngineApiTreeHandler< + EthPrimitives, + MockEthProvider, + MockExecutorProvider, + EthEngineTypes, + ChainSpec, + >, to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, blocks: Vec, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 52d573f2b3b..224e025f39d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -82,6 +82,7 @@ pub mod serde_bincode_compat { /// Temp helper struct for integrating [`NodePrimitives`]. #[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[non_exhaustive] pub struct EthPrimitives; impl reth_primitives_traits::NodePrimitives for EthPrimitives { From 47245642ca481c58da60e698381981c1edcbb264 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 27 Nov 2024 20:18:31 +0100 Subject: [PATCH 733/970] feat(trie): `SparseStateTrie::storage_trie_mut` (#12913) --- crates/trie/sparse/src/state.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index cd677ad5cd6..551a47ce2bb 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,6 +1,4 @@ -use std::iter::Peekable; - -use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use crate::{RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie}; use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, @@ -10,6 +8,7 @@ use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, MultiProof, Nibbles, TrieNode, }; +use std::iter::Peekable; /// Sparse state trie representing lazy-loaded Ethereum state trie. #[derive(Default, Debug)] @@ -47,6 +46,11 @@ impl SparseStateTrie { self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } + /// Returned mutable reference to storage sparse trie if it was revealed. + pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { + self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) + } + /// Reveal unknown trie paths from provided leaf path and its proof for the account. /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( From 8d70e8921b2f932e544f3cfe1cf662df9ef197ec Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Thu, 28 Nov 2024 02:59:50 +0700 Subject: [PATCH 734/970] chore: pass generic header to validate_header_base_fee (#12921) --- crates/consensus/common/src/validation.rs | 10 +++++----- crates/ethereum/consensus/src/lib.rs | 2 +- crates/optimism/consensus/src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1709e3a14f4..21062115b55 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,6 +1,6 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, Header}; use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; @@ -23,12 +23,12 @@ pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> /// Ensure the EIP-1559 base fee is set if the London hardfork is active. #[inline] -pub fn validate_header_base_fee( - header: &Header, +pub fn validate_header_base_fee( + header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && - header.base_fee_per_gas.is_none() + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && + header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ffabe5b1952..1bb7b1422c3 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -119,7 +119,7 @@ impl HeaderVa { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec)?; + validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index e8b7959dd27..58b93b53dfb 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -93,7 +93,7 @@ impl Consensus for OpBeaconConsensus { impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec) + validate_header_base_fee(header.header(), &self.chain_spec) } fn validate_header_against_parent( From 2179301590b3dec0598910f553042ac078cfe1ac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 27 Nov 2024 23:35:23 +0100 Subject: [PATCH 735/970] feat: add functions for env creation (#12928) --- .../engine/invalid-block-hooks/src/witness.rs | 10 +++----- crates/engine/util/src/reorg.rs | 8 ++----- crates/ethereum/evm/src/execute.rs | 7 ++---- crates/ethereum/evm/src/lib.rs | 16 +++---------- crates/evm/src/lib.rs | 24 ++++++++++++++++--- crates/optimism/evm/src/execute.rs | 9 ++----- crates/optimism/evm/src/lib.rs | 15 ++++-------- .../custom-beacon-withdrawals/src/main.rs | 10 ++------ 8 files changed, 39 insertions(+), 60 deletions(-) diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index a9cafbdb12e..98ee8dd2d13 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -12,10 +12,8 @@ use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, - db::states::bundle_state::BundleRetention, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - DatabaseCommit, StateBuilder, + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, + primitives::EnvWithHandlerCfg, DatabaseCommit, StateBuilder, }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; @@ -76,9 +74,7 @@ where .build(); // Setup environment for the execution. - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, block.header(), U256::MAX); + let (cfg, block_env) = self.evm_config.cfg_and_block_env(block.header(), U256::MAX); // Setup EVM let mut evm = self.evm_config.evm_with_env( diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 46a5e08a738..20e2b21446a 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -27,9 +27,7 @@ use reth_revm::{ }; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_trie::HashedPostState; -use revm_primitives::{ - calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, -}; +use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, future::Future, @@ -298,9 +296,7 @@ where .build(); // Configure environments - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, &reorg_target.header, U256::MAX); + let (cfg, block_env) = evm_config.cfg_and_block_env(&reorg_target.header, U256::MAX); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = evm_config.evm_with_env(&mut state, env); diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f04ff46d9e5..8642df89698 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -24,7 +24,7 @@ use reth_primitives::{BlockWithSenders, Receipt}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, + EnvWithHandlerCfg, ResultAndState, U256, }; /// Factory for [`EthExecutionStrategy`]. @@ -117,10 +117,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 206230cd00e..8042562357f 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -207,17 +207,11 @@ mod tests { primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::collections::HashSet; #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -236,12 +230,8 @@ mod tests { // Use the `EthEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - EthEvmConfig::new(Arc::new(chain_spec.clone())).fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &header, - total_difficulty, - ); + let (cfg_env, _) = EthEvmConfig::new(Arc::new(chain_spec.clone())) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index f01701d5989..ae884bdd5f8 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -17,6 +17,7 @@ extern crate alloc; +use crate::builder::RethEvmBuilder; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives::TransactionSigned; @@ -24,8 +25,6 @@ use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; -use crate::builder::RethEvmBuilder; - pub mod builder; pub mod either; pub mod execute; @@ -139,9 +138,16 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { data: Bytes, ); + /// Returns a [`CfgEnvWithHandlerCfg`] for the given header. + fn cfg_env(&self, header: &Self::Header, total_difficulty: U256) -> CfgEnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + self.fill_cfg_env(&mut cfg, header, total_difficulty); + cfg + } + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header. /// - /// This must set the corresponding spec id in the handler cfg, based on timestamp or total + /// This __must__ set the corresponding spec id in the handler cfg, based on timestamp or total /// difficulty fn fill_cfg_env( &self, @@ -171,6 +177,18 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { } } + /// Creates a new [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the given header. + fn cfg_and_block_env( + &self, + header: &Self::Header, + total_difficulty: U256, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + (cfg, block_env) + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and /// [`ConfigureEvmEnv::fill_block_env`]. /// diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 042b8e29193..1c93d2b71d0 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -22,9 +22,7 @@ use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; +use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. @@ -106,10 +104,7 @@ where /// /// Caution: this does not initialize the tx environment. fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 31074627510..176864de6dc 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -213,14 +213,13 @@ mod tests { use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_primitives::OpPrimitives; use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; - use reth_revm::{ db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -232,12 +231,6 @@ mod tests { #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -254,10 +247,10 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to create the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) - .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); + let (cfg_env, _) = OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 47adc64c004..ccba73afbc1 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -15,10 +15,7 @@ use reth::{ providers::ProviderError, revm::{ interpreter::Host, - primitives::{ - address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, - TransactTo, TxEnv, U256, - }, + primitives::{address, Address, Bytes, Env, EnvWithHandlerCfg, TransactTo, TxEnv, U256}, Database, DatabaseCommit, Evm, State, }, }; @@ -133,10 +130,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } From 0df02ca2b9bd6412f7a12413d342424a4b05e3bf Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:49:27 +0700 Subject: [PATCH 736/970] chore: pass generic header and body to validate_shanghai_withdrawals (#12923) --- Cargo.lock | 1 + crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 11 ++++++----- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33d60eac3f0..b17d4b44d51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6827,6 +6827,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "revm-primitives", ] diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index c83312577e9..272adbb9297 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +reth-primitives-traits.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 21062115b55..cbc4f860708 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -41,15 +41,16 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), ConsensusError> { - let withdrawals = - block.body.withdrawals.as_ref().ok_or(ConsensusError::BodyWithdrawalsMissing)?; +pub fn validate_shanghai_withdrawals( + block: &SealedBlock, +) -> Result<(), ConsensusError> { + let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = - block.withdrawals_root.as_ref().ok_or(ConsensusError::WithdrawalsRootMissing)?; + block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: *header_withdrawals_root }.into(), + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), )); } Ok(()) From bb0bd779161b92d6284bbcbe8e67655cd139a89c Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:53:26 +0700 Subject: [PATCH 737/970] chore: make generic data primitives TreeState (#12924) --- crates/engine/tree/src/tree/mod.rs | 41 +++++++++++++++--------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e9e86d3b09b..a717ea295b0 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -37,7 +37,8 @@ use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - Block, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, + SealedHeader, }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, @@ -90,17 +91,17 @@ mod root; /// - This only stores blocks that are connected to the canonical chain. /// - All executed blocks are valid and have been executed. #[derive(Debug, Default)] -pub struct TreeState { +pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - blocks_by_hash: HashMap, + blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - blocks_by_number: BTreeMap>, + blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. parent_to_child: HashMap>, /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. @@ -111,7 +112,7 @@ pub struct TreeState { current_canonical_head: BlockNumHash, } -impl TreeState { +impl TreeState { /// Returns a new, empty tree state that points to the given canonical head. fn new(current_canonical_head: BlockNumHash) -> Self { Self { @@ -129,12 +130,12 @@ impl TreeState { } /// Returns the [`ExecutedBlock`] by hash. - fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { + fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -142,12 +143,12 @@ impl TreeState { /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. /// /// Returns `None` if the block for the given hash is not found. - fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec)> { + fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; - let mut parent_hash = block.block().parent_hash; + let mut parent_hash = block.block().parent_hash(); let mut blocks = vec![block]; while let Some(executed) = self.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; + parent_hash = executed.block.parent_hash(); blocks.push(executed.clone()); } @@ -155,10 +156,10 @@ impl TreeState { } /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { + fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash; - let block_number = executed.block.number; + let parent_hash = executed.block.parent_hash(); + let block_number = executed.block.number(); if self.blocks_by_hash.contains_key(&hash) { return; @@ -186,11 +187,11 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. - let parent_entry = self.parent_to_child.entry(executed.block.parent_hash); + let parent_entry = self.parent_to_child.entry(executed.block.parent_hash()); if let hash_map::Entry::Occupied(mut entry) = parent_entry { entry.get_mut().remove(&hash); @@ -203,7 +204,7 @@ impl TreeState { let children = self.parent_to_child.remove(&hash).unwrap_or_default(); // Remove this block from `blocks_by_number`. - let block_number_entry = self.blocks_by_number.entry(executed.block.number); + let block_number_entry = self.blocks_by_number.entry(executed.block.number()); if let btree_map::Entry::Occupied(mut entry) = block_number_entry { // We have to find the index of the block since it exists in a vec if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { @@ -227,7 +228,7 @@ impl TreeState { } while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; + current_block = executed.block.parent_hash(); if current_block == hash { return true } @@ -255,14 +256,14 @@ impl TreeState { // upper bound let mut current_block = self.current_canonical_head.hash; while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; - if executed.block.number <= upper_bound { + current_block = executed.block.parent_hash(); + if executed.block.number() <= upper_bound { debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); // finally, move the trie updates self.persisted_trie_updates - .insert(removed.block.hash(), (removed.block.number, removed.trie)); + .insert(removed.block.hash(), (removed.block.number(), removed.trie)); } } } From a3eb302f7275fbde603eaeb4c1d5a97075793509 Mon Sep 17 00:00:00 2001 From: maze <38567289+0xMaze@users.noreply.github.com> Date: Thu, 28 Nov 2024 00:03:44 -0800 Subject: [PATCH 738/970] chore: pass generic header to validate_header_gas (#12931) --- crates/consensus/common/src/validation.rs | 8 ++++---- crates/ethereum/consensus/src/lib.rs | 2 +- crates/optimism/consensus/src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index cbc4f860708..b5314cdd1ec 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -11,11 +11,11 @@ use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. #[inline] -pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> { - if header.gas_used > header.gas_limit { +pub fn validate_header_gas(header: &H) -> Result<(), ConsensusError> { + if header.gas_used() > header.gas_limit() { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { - gas_used: header.gas_used, - gas_limit: header.gas_limit, + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), }) } Ok(()) diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 1bb7b1422c3..2c260c4a7d1 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -118,7 +118,7 @@ impl HeaderVa for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; + validate_header_gas(header.header())?; validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 58b93b53dfb..69d94378582 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -92,7 +92,7 @@ impl Consensus for OpBeaconConsensus { impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; + validate_header_gas(header.header())?; validate_header_base_fee(header.header(), &self.chain_spec) } From 39e057375a2f9a8c1bdfdd224970aa7e28ee3425 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Thu, 28 Nov 2024 16:20:17 +0700 Subject: [PATCH 739/970] chore: make generic header InvalidHeaderCache (#12940) --- .../beacon/src/engine/invalid_headers.rs | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index b8d80b0ceea..0a72129a627 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -6,7 +6,7 @@ use reth_metrics::{ }; use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -17,20 +17,20 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap, + headers: LruMap>, /// Metrics for the cache. metrics: InvalidHeaderCacheMetrics, } -impl InvalidHeaderCache { +impl InvalidHeaderCache { /// Invalid header cache constructor. pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc
) { + fn insert_entry(&mut self, hash: B256, header: Arc) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -38,7 +38,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option> { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -53,11 +53,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( - &mut self, - header_hash: B256, - invalid_ancestor: Arc
, - ) { + pub fn insert_with_invalid_ancestor(&mut self, header_hash: B256, invalid_ancestor: Arc) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); self.insert_entry(header_hash, invalid_ancestor); @@ -69,7 +65,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { + pub fn insert(&mut self, invalid_ancestor: SealedHeader) { if self.get(&invalid_ancestor.hash()).is_none() { let hash = invalid_ancestor.hash(); let header = invalid_ancestor.unseal(); @@ -83,11 +79,11 @@ impl InvalidHeaderCache { } } -struct HeaderEntry { +struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, /// The actually header entry - header: Arc
, + header: Arc, } /// Metrics for the invalid headers cache. From 28ef5749e759a2b2c17260369fac2c6adb116bf9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 28 Nov 2024 09:46:08 +0000 Subject: [PATCH 740/970] feat(engine): integrate sparse trie into the state root task (#12907) --- Cargo.lock | 1 - crates/engine/tree/Cargo.toml | 5 - .../tree/benches/state_root_from_proofs.rs | 81 ---- crates/engine/tree/src/tree/mod.rs | 1 - crates/engine/tree/src/tree/root.rs | 358 ++++++------------ 5 files changed, 116 insertions(+), 330 deletions(-) delete mode 100644 crates/engine/tree/benches/state_root_from_proofs.rs diff --git a/Cargo.lock b/Cargo.lock index b17d4b44d51..1f08a6ffe15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7267,7 +7267,6 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-evm", - "reth-execution-errors", "reth-exex-types", "reth-metrics", "reth-network-p2p", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 01d7e7e2024..47e5c2b04fe 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -21,7 +21,6 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true -reth-execution-errors.workspace = true reth-network-p2p.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true @@ -95,10 +94,6 @@ rand.workspace = true name = "channel_perf" harness = false -[[bench]] -name = "state_root_from_proofs" -harness = false - [features] test-utils = [ "reth-blockchain-tree/test-utils", diff --git a/crates/engine/tree/benches/state_root_from_proofs.rs b/crates/engine/tree/benches/state_root_from_proofs.rs deleted file mode 100644 index 4c8e85696ea..00000000000 --- a/crates/engine/tree/benches/state_root_from_proofs.rs +++ /dev/null @@ -1,81 +0,0 @@ -#![allow(missing_docs)] - -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use reth_engine_tree::tree::calculate_state_root_from_proofs; -use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; -use reth_trie::{ - updates::TrieUpdatesSorted, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProof, -}; -use revm_primitives::{ - keccak256, Account, AccountInfo, AccountStatus, Address, EvmStorage, EvmStorageSlot, HashMap, - HashSet, B256, U256, -}; - -fn create_test_state(size: usize) -> (HashMap>, HashedPostState) { - let mut state = HashedPostState::default(); - let mut targets = HashMap::default(); - - for i in 0..size { - let address = Address::random(); - let hashed_address = keccak256(address); - - // Create account - let info = AccountInfo { - balance: U256::from(100 + i), - nonce: i as u64, - code_hash: B256::random(), - code: Default::default(), - }; - - // Create storage with multiple slots - let mut storage = EvmStorage::default(); - let mut slots = HashSet::default(); - for j in 0..100 { - let slot = U256::from(j); - let value = U256::from(100 + j); - storage.insert(slot, EvmStorageSlot::new(value)); - slots.insert(keccak256(B256::from(slot))); - } - - let account = Account { info, storage: storage.clone(), status: AccountStatus::Loaded }; - - state.accounts.insert(hashed_address, Some(account.info.into())); - state.storages.insert( - hashed_address, - HashedStorage::from_iter( - false, - storage.into_iter().map(|(k, v)| (keccak256(B256::from(k)), v.present_value)), - ), - ); - targets.insert(hashed_address, slots); - } - - (targets, state) -} - -fn bench_state_root_collection(c: &mut Criterion) { - let factory = create_test_provider_factory(); - let view = ConsistentDbView::new(factory, None); - - let mut group = c.benchmark_group("state_root_collection"); - for size in &[10, 100, 1000] { - let (_targets, state) = create_test_state(*size); - let multiproof = MultiProof::default(); - - group.bench_with_input(format!("size_{}", size), size, |b, _| { - b.iter(|| { - black_box(calculate_state_root_from_proofs( - view.clone(), - &TrieUpdatesSorted::default(), - &HashedPostStateSorted::default(), - multiproof.clone(), - state.clone(), - )) - }); - }); - } - group.finish(); -} - -criterion_group!(benches, bench_state_root_collection); -criterion_main!(benches); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a717ea295b0..44270cbfdf4 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -80,7 +80,6 @@ pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; -pub use root::calculate_state_root_from_proofs; mod root; diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 32bfbf68604..a2fb44ea3bd 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,23 +1,15 @@ //! State root task related functionality. -use alloy_primitives::map::{DefaultHashBuilder, FbHashMap, FbHashSet, HashMap, HashSet}; +use alloy_primitives::map::{FbHashMap, HashMap, HashSet}; use alloy_rlp::{BufMut, Encodable}; -use rayon::iter::{IntoParallelIterator, ParallelIterator}; -use reth_errors::ProviderResult; -use reth_execution_errors::TrieWitnessError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, }; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, - proof::Proof, - trie_cursor::InMemoryTrieCursorFactory, - updates::{TrieUpdates, TrieUpdatesSorted}, - witness::{next_root_from_proofs, target_nodes}, - HashedPostState, HashedPostStateSorted, HashedStorage, MultiProof, Nibbles, TrieAccount, - TrieInput, EMPTY_ROOT_HASH, + proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, + TrieAccount, TrieInput, EMPTY_ROOT_HASH, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseProof, DatabaseTrieCursorFactory}; +use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult}; use revm_primitives::{keccak256, EvmState, B256}; @@ -94,15 +86,15 @@ pub(crate) enum StateRootMessage { ProofCalculated { /// The calculated proof proof: MultiProof, + /// The state update that was used to calculate the proof + state_update: HashedPostState, /// The index of this proof in the sequence of state updates sequence_number: u64, }, /// State root calculation completed RootCalculated { - /// The calculated state root - root: B256, - /// The trie updates produced during calculation - updates: TrieUpdates, + /// The updated sparse trie + trie: Box, /// Time taken to calculate the root elapsed: Duration, }, @@ -115,8 +107,8 @@ pub(crate) struct ProofSequencer { next_sequence: u64, /// The next sequence number expected to be delivered. next_to_deliver: u64, - /// Buffer for out-of-order proofs - pending_proofs: BTreeMap, + /// Buffer for out-of-order proofs and corresponding state updates + pending_proofs: BTreeMap, } impl ProofSequencer { @@ -132,10 +124,16 @@ impl ProofSequencer { seq } - /// Adds a proof and returns all sequential proofs if we have a continuous sequence - pub(crate) fn add_proof(&mut self, sequence: u64, proof: MultiProof) -> Vec { + /// Adds a proof with the corresponding state update and returns all sequential proofs and state + /// updates if we have a continuous sequence + pub(crate) fn add_proof( + &mut self, + sequence: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Vec<(MultiProof, HashedPostState)> { if sequence >= self.next_to_deliver { - self.pending_proofs.insert(sequence, proof); + self.pending_proofs.insert(sequence, (proof, state_update)); } // return early if we don't have the next expected proof @@ -146,9 +144,9 @@ impl ProofSequencer { let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); let mut current_sequence = self.next_to_deliver; - // keep collecting proofs as long as we have consecutive sequence numbers - while let Some(proof) = self.pending_proofs.remove(¤t_sequence) { - consecutive_proofs.push(proof); + // keep collecting proofs and state updates as long as we have consecutive sequence numbers + while let Some((proof, state_update)) = self.pending_proofs.remove(¤t_sequence) { + consecutive_proofs.push((proof, state_update)); current_sequence += 1; // if we don't have the next number, stop collecting @@ -180,18 +178,19 @@ impl ProofSequencer { /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] pub(crate) struct StateRootTask { + /// Task configuration + config: StateRootConfig, /// Receiver for state root related messages rx: Receiver, /// Sender for state root related messages tx: Sender, - /// Task configuration - config: StateRootConfig, - /// Current state - state: HashedPostState, + /// Proof targets that have been already fetched + fetched_proof_targets: HashSet, /// Proof sequencing handler proof_sequencer: ProofSequencer, - /// Whether we're currently calculating a root - calculating_root: bool, + /// The sparse trie used for the state root calculation. If [`None`], then update is in + /// progress. + sparse_trie: Option>, } #[allow(dead_code)] @@ -209,9 +208,9 @@ where config, rx, tx, - state: Default::default(), + fetched_proof_targets: Default::default(), proof_sequencer: ProofSequencer::new(), - calculating_root: false, + sparse_trie: Some(Box::new(SparseStateTrie::default().with_updates(true))), } } @@ -231,14 +230,16 @@ where } /// Handles state updates. + /// + /// Returns proof targets derived from the state update. fn on_state_update( view: ConsistentDbView, input: Arc, update: EvmState, - state: &mut HashedPostState, + fetched_proof_targets: &HashSet, proof_sequence_number: u64, state_root_message_sender: Sender, - ) { + ) -> HashMap> { let mut hashed_state_update = HashedPostState::default(); for (address, account) in update { if account.is_touched() { @@ -263,20 +264,10 @@ where } } - // Dispatch proof gathering for this state update - let targets = hashed_state_update - .accounts - .keys() - .filter(|hashed_address| { - !state.accounts.contains_key(*hashed_address) && - !state.storages.contains_key(*hashed_address) - }) - .map(|hashed_address| (*hashed_address, HashSet::default())) - .chain(hashed_state_update.storages.iter().map(|(hashed_address, storage)| { - (*hashed_address, storage.storage.keys().copied().collect()) - })) - .collect::>(); + let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + // Dispatch proof gathering for this state update + let targets = proof_targets.clone(); rayon::spawn(move || { let provider = match view.provider_ro() { Ok(provider) => provider, @@ -287,12 +278,17 @@ where }; // TODO: replace with parallel proof - let result = - Proof::overlay_multiproof(provider.tx_ref(), input.as_ref().clone(), targets); + let result = Proof::overlay_multiproof( + provider.tx_ref(), + // TODO(alexey): this clone can be expensive, we should avoid it + input.as_ref().clone(), + targets, + ); match result { Ok(proof) => { let _ = state_root_message_sender.send(StateRootMessage::ProofCalculated { proof, + state_update: hashed_state_update, sequence_number: proof_sequence_number, }); } @@ -302,30 +298,33 @@ where } }); - state.extend(hashed_state_update); + proof_targets } /// Handler for new proof calculated, aggregates all the existing sequential proofs. - fn on_proof(&mut self, proof: MultiProof, sequence_number: u64) -> Option { - let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof); + fn on_proof( + &mut self, + sequence_number: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Option<(MultiProof, HashedPostState)> { + let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof, state_update); if ready_proofs.is_empty() { None } else { - // combine all ready proofs into one - ready_proofs.into_iter().reduce(|mut acc, proof| { - acc.extend(proof); + // Merge all ready proofs and state updates + ready_proofs.into_iter().reduce(|mut acc, (proof, state_update)| { + acc.0.extend(proof); + acc.1.extend(state_update); acc }) } } - /// Spawns root calculation with the current state and proofs - fn spawn_root_calculation(&mut self, multiproof: MultiProof) { - if self.calculating_root { - return; - } - self.calculating_root = true; + /// Spawns root calculation with the current state and proofs. + fn spawn_root_calculation(&mut self, state: HashedPostState, multiproof: MultiProof) { + let Some(trie) = self.sparse_trie.take() else { return }; trace!( target: "engine::root", @@ -334,28 +333,20 @@ where "Spawning root calculation" ); - let tx = self.tx.clone(); - let view = self.config.consistent_view.clone(); - let input = self.config.input.clone(); - let state = self.state.clone(); + // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them + let targets = get_proof_targets(&state, &HashSet::default()); + let tx = self.tx.clone(); rayon::spawn(move || { - let result = calculate_state_root_from_proofs( - view, - &input.nodes.clone().into_sorted(), - &input.state.clone().into_sorted(), - multiproof, - state, - ); + let result = update_sparse_trie(trie, multiproof, targets, state); match result { - Ok((root, updates, elapsed)) => { + Ok((trie, elapsed)) => { trace!( target: "engine::root", - %root, ?elapsed, "Root calculation completed, sending result" ); - let _ = tx.send(StateRootMessage::RootCalculated { root, updates, elapsed }); + let _ = tx.send(StateRootMessage::RootCalculated { trie, elapsed }); } Err(e) => { error!(target: "engine::root", error = ?e, "Could not calculate state root"); @@ -365,9 +356,8 @@ where } fn run(mut self) -> StateRootResult { + let mut current_state_update = HashedPostState::default(); let mut current_multiproof = MultiProof::default(); - let mut trie_updates = TrieUpdates::default(); - let mut current_root: B256; let mut updates_received = 0; let mut proofs_processed = 0; let mut roots_calculated = 0; @@ -383,16 +373,18 @@ where total_updates = updates_received, "Received new state update" ); - Self::on_state_update( + let targets = Self::on_state_update( self.config.consistent_view.clone(), self.config.input.clone(), update, - &mut self.state, + &self.fetched_proof_targets, self.proof_sequencer.next_sequence(), self.tx.clone(), ); + self.fetched_proof_targets.extend(targets.keys()); + self.fetched_proof_targets.extend(targets.values().flatten()); } - StateRootMessage::ProofCalculated { proof, sequence_number } => { + StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { proofs_processed += 1; trace!( target: "engine::root", @@ -401,28 +393,28 @@ where "Processing calculated proof" ); - if let Some(combined_proof) = self.on_proof(proof, sequence_number) { - if self.calculating_root { + if let Some((combined_proof, combined_state_update)) = + self.on_proof(sequence_number, proof, state_update) + { + if self.sparse_trie.is_none() { current_multiproof.extend(combined_proof); + current_state_update.extend(combined_state_update); } else { - self.spawn_root_calculation(combined_proof); + self.spawn_root_calculation(combined_state_update, combined_proof); } } } - StateRootMessage::RootCalculated { root, updates, elapsed } => { + StateRootMessage::RootCalculated { trie, elapsed } => { roots_calculated += 1; trace!( target: "engine::root", - %root, ?elapsed, roots_calculated, proofs = proofs_processed, updates = updates_received, "Computed intermediate root" ); - current_root = root; - trie_updates.extend(updates); - self.calculating_root = false; + self.sparse_trie = Some(trie); let has_new_proofs = !current_multiproof.account_subtree.is_empty() || !current_multiproof.storages.is_empty(); @@ -445,7 +437,10 @@ where storage_proofs = current_multiproof.storages.len(), "Spawning subsequent root calculation" ); - self.spawn_root_calculation(std::mem::take(&mut current_multiproof)); + self.spawn_root_calculation( + std::mem::take(&mut current_state_update), + std::mem::take(&mut current_multiproof), + ); } else if all_proofs_received && no_pending { debug!( target: "engine::root", @@ -454,7 +449,15 @@ where roots_calculated, "All proofs processed, ending calculation" ); - return Ok((current_root, trie_updates)); + let mut trie = self + .sparse_trie + .take() + .expect("sparse trie update should not be in progress"); + let root = trie.root().expect("sparse trie should be revealed"); + let trie_updates = trie + .take_trie_updates() + .expect("sparse trie should have updates retention enabled"); + return Ok((root, trie_updates)); } } }, @@ -474,156 +477,27 @@ where } } -/// Calculate state root from proofs. -pub fn calculate_state_root_from_proofs( - view: ConsistentDbView, - input_nodes_sorted: &TrieUpdatesSorted, - input_state_sorted: &HashedPostStateSorted, - multiproof: MultiProof, - state: HashedPostState, -) -> ProviderResult<(B256, TrieUpdates, Duration)> -where - Factory: DatabaseProviderFactory + Clone, -{ - let started_at = Instant::now(); - - let proof_targets: HashMap> = state +fn get_proof_targets( + state_update: &HashedPostState, + fetched_proof_targets: &HashSet, +) -> HashMap> { + state_update .accounts .keys() + .filter(|hashed_address| !fetched_proof_targets.contains(*hashed_address)) .map(|hashed_address| (*hashed_address, HashSet::default())) - .chain(state.storages.iter().map(|(hashed_address, storage)| { + .chain(state_update.storages.iter().map(|(hashed_address, storage)| { (*hashed_address, storage.storage.keys().copied().collect()) })) - .collect(); - - let account_trie_nodes = proof_targets - .into_par_iter() - .map_init( - || view.provider_ro().unwrap(), - |provider_ro, (hashed_address, hashed_slots)| { - // Gather and record storage trie nodes for this account. - let mut storage_trie_nodes = BTreeMap::default(); - let storage = state.storages.get(&hashed_address); - for hashed_slot in hashed_slots { - let slot_key = Nibbles::unpack(hashed_slot); - let slot_value = storage - .and_then(|s| s.storage.get(&hashed_slot)) - .filter(|v| !v.is_zero()) - .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - let proof = multiproof - .storages - .get(&hashed_address) - .map(|proof| { - proof - .subtree - .iter() - .filter(|e| slot_key.starts_with(e.0)) - .collect::>() - }) - .unwrap_or_default(); - storage_trie_nodes.extend(target_nodes( - slot_key.clone(), - slot_value, - None, - proof, - )?); - } - - let storage_root = next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let mut targets = HashMap::with_hasher(DefaultHashBuilder::default()); - let mut slots = HashSet::with_hasher(DefaultHashBuilder::default()); - slots.insert(B256::from_slice(&padded_key)); - targets.insert(hashed_address, slots); - let proof = Proof::new( - InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - input_nodes_sorted, - ), - HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - input_state_sorted, - ), - ) - .multiproof(targets) - .unwrap(); - - // The subtree only contains the proof for a single target. - let node = proof - .storages - .get(&hashed_address) - .and_then(|storage_multiproof| storage_multiproof.subtree.get(&key)) - .cloned() - .ok_or(TrieWitnessError::MissingTargetNode(key))?; - Ok(node) - })?; - - // Gather and record account trie nodes. - let account = state - .accounts - .get(&hashed_address) - .ok_or(TrieWitnessError::MissingAccount(hashed_address))?; - let value = (account.is_some() || storage_root != EMPTY_ROOT_HASH).then(|| { - let mut encoded = Vec::with_capacity(128); - TrieAccount::from((account.unwrap_or_default(), storage_root)) - .encode(&mut encoded as &mut dyn BufMut); - encoded - }); - let key = Nibbles::unpack(hashed_address); - let proof = multiproof.account_subtree.iter().filter(|e| key.starts_with(e.0)); - target_nodes(key.clone(), value, None, proof) - }, - ) - .try_reduce(BTreeMap::new, |mut acc, map| { - acc.extend(map.into_iter()); - Ok(acc) - })?; - - let provider_ro = view.provider_ro()?; - - let state_root = next_root_from_proofs(account_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let mut targets = HashMap::with_hasher(DefaultHashBuilder::default()); - targets.insert( - B256::from_slice(&padded_key), - HashSet::with_hasher(DefaultHashBuilder::default()), - ); - let proof = Proof::new( - InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - input_nodes_sorted, - ), - HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - input_state_sorted, - ), - ) - .multiproof(targets) - .unwrap(); - - // The subtree only contains the proof for a single target. - let node = proof - .account_subtree - .get(&key) - .cloned() - .ok_or(TrieWitnessError::MissingTargetNode(key))?; - Ok(node) - })?; - - Ok((state_root, Default::default(), started_at.elapsed())) + .collect() } /// Updates the sparse trie with the given proofs and state, and returns the updated trie and the /// time it took. -#[allow(dead_code)] fn update_sparse_trie( mut trie: Box, multiproof: MultiProof, - targets: FbHashMap<32, FbHashSet<32>>, + targets: HashMap>, state: HashedPostState, ) -> SparseStateTrieResult<(Box, Duration)> { let started_at = Instant::now(); @@ -860,11 +734,11 @@ mod tests { let proof2 = MultiProof::default(); sequencer.next_sequence = 2; - let ready = sequencer.add_proof(0, proof1); + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); assert_eq!(ready.len(), 1); assert!(!sequencer.has_pending()); - let ready = sequencer.add_proof(1, proof2); + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); assert_eq!(ready.len(), 1); assert!(!sequencer.has_pending()); } @@ -877,15 +751,15 @@ mod tests { let proof3 = MultiProof::default(); sequencer.next_sequence = 3; - let ready = sequencer.add_proof(2, proof3); + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); assert_eq!(ready.len(), 0); assert!(sequencer.has_pending()); - let ready = sequencer.add_proof(0, proof1); + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); assert_eq!(ready.len(), 1); assert!(sequencer.has_pending()); - let ready = sequencer.add_proof(1, proof2); + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); assert_eq!(ready.len(), 2); assert!(!sequencer.has_pending()); } @@ -897,10 +771,10 @@ mod tests { let proof3 = MultiProof::default(); sequencer.next_sequence = 3; - let ready = sequencer.add_proof(0, proof1); + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); assert_eq!(ready.len(), 1); - let ready = sequencer.add_proof(2, proof3); + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); assert_eq!(ready.len(), 0); assert!(sequencer.has_pending()); } @@ -911,10 +785,10 @@ mod tests { let proof1 = MultiProof::default(); let proof2 = MultiProof::default(); - let ready = sequencer.add_proof(0, proof1); + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); assert_eq!(ready.len(), 1); - let ready = sequencer.add_proof(0, proof2); + let ready = sequencer.add_proof(0, proof2, HashedPostState::default()); assert_eq!(ready.len(), 0); assert!(!sequencer.has_pending()); } @@ -925,12 +799,12 @@ mod tests { let proofs: Vec<_> = (0..5).map(|_| MultiProof::default()).collect(); sequencer.next_sequence = 5; - sequencer.add_proof(4, proofs[4].clone()); - sequencer.add_proof(2, proofs[2].clone()); - sequencer.add_proof(1, proofs[1].clone()); - sequencer.add_proof(3, proofs[3].clone()); + sequencer.add_proof(4, proofs[4].clone(), HashedPostState::default()); + sequencer.add_proof(2, proofs[2].clone(), HashedPostState::default()); + sequencer.add_proof(1, proofs[1].clone(), HashedPostState::default()); + sequencer.add_proof(3, proofs[3].clone(), HashedPostState::default()); - let ready = sequencer.add_proof(0, proofs[0].clone()); + let ready = sequencer.add_proof(0, proofs[0].clone(), HashedPostState::default()); assert_eq!(ready.len(), 5); assert!(!sequencer.has_pending()); } From 064f9393fff0d61fd7e5df77f80bda29757144c5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 11:26:58 +0100 Subject: [PATCH 741/970] perf(tree): reveal multiproof in tree task (#12949) --- crates/engine/tree/src/tree/root.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index a2fb44ea3bd..222ff8e6248 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -503,16 +503,7 @@ fn update_sparse_trie( let started_at = Instant::now(); // Reveal new accounts and storage slots. - for (address, slots) in targets { - let path = Nibbles::unpack(address); - trie.reveal_account(address, multiproof.account_proof_nodes(&path))?; - - let storage_proofs = multiproof.storage_proof_nodes(address, slots); - - for (slot, proof) in storage_proofs { - trie.reveal_storage_slot(address, slot, proof)?; - } - } + trie.reveal_multiproof(targets, multiproof)?; // Update storage slots with new values and calculate storage roots. let mut storage_roots = FbHashMap::default(); From b2e1da3ebc8b04bc09cb14c61e820788d33511de Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 11:28:07 +0100 Subject: [PATCH 742/970] fix(tree): root task storage leaf removal (#12950) --- crates/engine/tree/src/tree/root.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 222ff8e6248..5d987648cff 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -514,12 +514,17 @@ fn update_sparse_trie( } for (slot, value) in storage.storage { - let slot_path = Nibbles::unpack(slot); - trie.update_storage_leaf( - address, - slot_path, - alloy_rlp::encode_fixed_size(&value).to_vec(), - )?; + let slot_nibbles = Nibbles::unpack(slot); + if value.is_zero() { + // TODO: handle blinded node error + trie.remove_storage_leaf(address, &slot_nibbles)?; + } else { + trie.update_storage_leaf( + address, + slot_nibbles, + alloy_rlp::encode_fixed_size(&value).to_vec(), + )?; + } } storage_roots.insert(address, trie.storage_root(address).unwrap()); @@ -527,7 +532,7 @@ fn update_sparse_trie( // Update accounts with new values and include updated storage roots for (address, account) in state.accounts { - let path = Nibbles::unpack(address); + let account_nibbles = Nibbles::unpack(address); if let Some(account) = account { let storage_root = storage_roots @@ -538,9 +543,10 @@ fn update_sparse_trie( let mut encoded = Vec::with_capacity(128); TrieAccount::from((account, storage_root)).encode(&mut encoded as &mut dyn BufMut); - trie.update_account_leaf(path, encoded)?; + trie.update_account_leaf(account_nibbles, encoded)?; } else { - trie.remove_account_leaf(&path)?; + // TODO: handle blinded node error + trie.remove_account_leaf(&account_nibbles)?; } } From cbcf79a18e002bef5927f236176458112b38bc4a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 28 Nov 2024 12:14:18 +0100 Subject: [PATCH 743/970] chore(engine, state root task): remove unused StdReceiverStream struct (#12953) --- crates/engine/tree/src/tree/root.rs | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 5d987648cff..17419bc6a4a 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -16,7 +16,7 @@ use revm_primitives::{keccak256, EvmState, B256}; use std::{ collections::BTreeMap, sync::{ - mpsc::{self, Receiver, RecvError, Sender}, + mpsc::{self, Receiver, Sender}, Arc, }, time::{Duration, Instant}, @@ -59,23 +59,6 @@ pub(crate) struct StateRootConfig { pub input: Arc, } -/// Wrapper for std channel receiver to maintain compatibility with `UnboundedReceiverStream` -#[derive(Debug)] -pub(crate) struct StdReceiverStream { - rx: Receiver, -} - -#[allow(dead_code)] -impl StdReceiverStream { - pub(crate) const fn new(rx: Receiver) -> Self { - Self { rx } - } - - pub(crate) fn recv(&self) -> Result { - self.rx.recv() - } -} - /// Messages used internally by the state root task #[derive(Debug)] #[allow(dead_code)] From e9a6e4525ebf00b113706b822ca734b364312e90 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 13:00:18 +0100 Subject: [PATCH 744/970] feat(trie): `SparseStateTrie::update_account` (#12954) --- crates/trie/{trie => common}/src/constants.rs | 2 +- crates/trie/common/src/lib.rs | 4 ++ crates/trie/sparse/Cargo.toml | 3 +- crates/trie/sparse/src/state.rs | 60 +++++++++++++++++-- crates/trie/sparse/src/trie.rs | 5 ++ crates/trie/trie/src/lib.rs | 4 -- 6 files changed, 67 insertions(+), 11 deletions(-) rename crates/trie/{trie => common}/src/constants.rs (94%) diff --git a/crates/trie/trie/src/constants.rs b/crates/trie/common/src/constants.rs similarity index 94% rename from crates/trie/trie/src/constants.rs rename to crates/trie/common/src/constants.rs index 7354290d959..471b8bd9dcc 100644 --- a/crates/trie/trie/src/constants.rs +++ b/crates/trie/common/src/constants.rs @@ -5,9 +5,9 @@ pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; #[cfg(test)] mod tests { use super::*; + use crate::TrieAccount; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; - use reth_trie_common::TrieAccount; #[test] fn account_rlp_max_size() { diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 04b817aab8f..6647de67811 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,6 +11,10 @@ /// The implementation of hash builder. pub mod hash_builder; +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + mod account; pub use account::TrieAccount; diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index dce232fcd57..efd68020ccd 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -14,8 +14,9 @@ workspace = true [dependencies] # reth -reth-tracing.workspace = true +reth-primitives-traits.workspace = true reth-trie-common.workspace = true +reth-tracing.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 551a47ce2bb..aad74ac0550 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,19 +1,21 @@ -use crate::{RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use crate::{ + RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::Decodable; +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::Account; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, - MultiProof, Nibbles, TrieNode, + MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use std::iter::Peekable; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SparseStateTrie { - retain_updates: bool, /// Sparse account trie. state: SparseTrie, /// Sparse storage tries. @@ -22,6 +24,23 @@ pub struct SparseStateTrie { revealed: HashMap>, /// Collection of addresses that had their storage tries wiped. wiped_storages: HashSet, + /// Flag indicating whether trie updates should be retained. + retain_updates: bool, + /// Reusable buffer for RLP encoding of trie accounts. + account_rlp_buf: Vec, +} + +impl Default for SparseStateTrie { + fn default() -> Self { + Self { + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + wiped_storages: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } } impl SparseStateTrie { @@ -186,6 +205,37 @@ impl SparseStateTrie { Ok(Some(root_node)) } + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + self.remove_account_leaf(&nibbles) + } else { + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + /// Update the account leaf node. pub fn update_account_leaf( &mut self, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 21f1cf410aa..2ecc3984445 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -166,6 +166,11 @@ impl RevealedSparseTrie { self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) } + /// Returns a reference to the leaf value if present. + pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { + self.values.get(path) + } + /// Takes and returns the retained sparse node updates pub fn take_updates(&mut self) -> SparseTrieUpdates { self.updates.take().unwrap_or_default() diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 335711b8d88..1e7eeb9b52b 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,10 +13,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Constants related to the trie computation. -mod constants; -pub use constants::*; - /// The implementation of forward-only in-memory cursor. pub mod forward_cursor; From c274462059425b835b0a74d25079e0469fd7429f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 13:12:47 +0100 Subject: [PATCH 745/970] perf: use alloy hash map in trie related code (#12956) --- .../src/providers/database/provider.rs | 10 ++++--- crates/storage/storage-api/src/hashing.rs | 4 +-- crates/storage/storage-api/src/trie.rs | 2 +- crates/trie/common/src/prefix_set.rs | 8 +++--- crates/trie/common/src/updates.rs | 27 +++++++++++-------- crates/trie/db/src/proof.rs | 4 +-- crates/trie/db/tests/trie.rs | 25 ++++++----------- crates/trie/db/tests/witness.rs | 26 ++++++++++-------- .../trie/trie/src/hashed_cursor/post_state.rs | 3 +-- crates/trie/trie/src/state.rs | 27 +++++++++++-------- crates/trie/trie/src/trie_cursor/in_memory.rs | 3 +-- crates/trie/trie/src/walker.rs | 3 +-- 12 files changed, 74 insertions(+), 68 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index da584744530..cfbe20cf4b4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -25,7 +25,11 @@ use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; -use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, +}; use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; @@ -71,7 +75,7 @@ use revm::{ }; use std::{ cmp::Ordering, - collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, @@ -2442,7 +2446,7 @@ impl HashingWriter for DatabaseProvi // Apply values to HashedState, and remove the account if it's None. let mut hashed_storage_keys: HashMap> = - HashMap::with_capacity(hashed_storages.len()); + HashMap::with_capacity_and_hasher(hashed_storages.len(), Default::default()); let mut hashed_storage = self.tx.cursor_dup_write::()?; for (hashed_address, key, value) in hashed_storages.into_iter().rev() { hashed_storage_keys.entry(hashed_address).or_default().insert(key); diff --git a/crates/storage/storage-api/src/hashing.rs b/crates/storage/storage-api/src/hashing.rs index c6958aa4d64..7cd30a82510 100644 --- a/crates/storage/storage-api/src/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -1,10 +1,10 @@ -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, B256}; use auto_impl::auto_impl; use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeMap, BTreeSet}, ops::{RangeBounds, RangeInclusive}, }; diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index d63f6037439..ee1ca1de180 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -114,7 +114,7 @@ pub trait StorageTrieWriter: Send + Sync { /// Returns the number of entries modified. fn write_storage_trie_updates( &self, - storage_tries: &std::collections::HashMap, + storage_tries: &HashMap, ) -> ProviderResult; /// Writes storage trie updates for the given hashed address. diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 1e3567f57d0..2536a41ff0c 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -1,9 +1,9 @@ use crate::Nibbles; -use alloy_primitives::B256; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; +use std::sync::Arc; /// Collection of mutable prefix sets. #[derive(Clone, Default, Debug)] diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 4e780a853ba..6f80eb16553 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,6 +1,8 @@ use crate::{BranchNodeCompact, HashBuilder, Nibbles}; -use alloy_primitives::B256; -use std::collections::{HashMap, HashSet}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -228,8 +230,8 @@ impl StorageTrieUpdates { #[cfg(any(test, feature = "serde"))] mod serde_nibbles_set { use crate::Nibbles; + use alloy_primitives::map::HashSet; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; - use std::collections::HashSet; pub(super) fn serialize(map: &HashSet, serializer: S) -> Result where @@ -264,13 +266,13 @@ mod serde_nibbles_set { #[cfg(any(test, feature = "serde"))] mod serde_nibbles_map { use crate::Nibbles; - use alloy_primitives::hex; + use alloy_primitives::{hex, map::HashMap}; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer, }; - use std::{collections::HashMap, marker::PhantomData}; + use std::marker::PhantomData; pub(super) fn serialize( map: &HashMap, @@ -314,7 +316,10 @@ mod serde_nibbles_map { where A: MapAccess<'de>, { - let mut result = HashMap::with_capacity(map.size_hint().unwrap_or(0)); + let mut result = HashMap::with_capacity_and_hasher( + map.size_hint().unwrap_or(0), + Default::default(), + ); while let Some((key, value)) = map.next_entry::()? { let decoded_key = @@ -406,13 +411,13 @@ fn exclude_empty_from_pair( #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { use crate::{BranchNodeCompact, Nibbles}; - use alloy_primitives::B256; + use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, + }; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, - }; + use std::borrow::Cow; /// Bincode-compatible [`super::TrieUpdates`] serde implementation. /// diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index dd00f6eda9c..99c87bf05eb 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -123,7 +123,7 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( Default::default(), - HashMap::from([(hashed_address, storage.into_sorted())]), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); Self::from_tx(tx, address) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( @@ -145,7 +145,7 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( Default::default(), - HashMap::from([(hashed_address, storage.into_sorted())]), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); Self::from_tx(tx, address) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 1e5d1a9f26b..4c614d83be6 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,13 +1,14 @@ #![allow(missing_docs)] use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; +use alloy_primitives::{hex_literal::hex, keccak256, map::HashMap, Address, B256, U256}; +use alloy_rlp::Encodable; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ @@ -15,25 +16,15 @@ use reth_provider::{ StorageTrieWriter, TrieWriter, }; use reth_trie::{ - prefix_set::PrefixSetMut, + prefix_set::{PrefixSetMut, TriePrefixSets}, test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, triehash::KeccakHasher, - BranchNodeCompact, StateRoot, StorageRoot, TrieMask, + updates::StorageTrieUpdates, + BranchNodeCompact, HashBuilder, IntermediateStateRootState, Nibbles, StateRoot, + StateRootProgress, StorageRoot, TrieAccount, TrieMask, }; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, -}; - -use alloy_rlp::Encodable; -use reth_db_api::transaction::DbTx; -use reth_trie::{ - prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, - IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, -}; +use std::{collections::BTreeMap, ops::Mul, str::FromStr, sync::Arc}; fn insert_account( tx: &impl DbTxMut, diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 8e00472b473..385f6269f39 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -27,7 +27,7 @@ fn includes_empty_node_preimage() { assert_eq!( TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), storages: HashMap::default(), }) .unwrap(), @@ -44,8 +44,8 @@ fn includes_empty_node_preimage() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter(false, [(hashed_slot, U256::from(1))]), )]), @@ -80,12 +80,16 @@ fn includes_nodes_for_destroyed_storage_nodes() { .multiproof(HashMap::from_iter([(hashed_address, HashSet::from_iter([hashed_slot]))])) .unwrap(); - let witness = TrieWitness::from_tx(provider.tx_ref()) - .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([(hashed_address, HashedStorage::from_iter(true, []))]), // destroyed - }) - .unwrap(); + let witness = + TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( + hashed_address, + HashedStorage::from_iter(true, []), + )]), // destroyed + }) + .unwrap(); assert!(witness.contains_key(&state_root)); for node in multiproof.account_subtree.values() { assert_eq!(witness.get(&keccak256(node)), Some(node)); @@ -126,8 +130,8 @@ fn correctly_decodes_branch_node_values() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter( false, diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 7521bb1b2bc..e0689d45087 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{map::HashSet, B256, U256}; use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index eca126744e9..fdfb86a53dd 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -2,15 +2,16 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, Nibbles, }; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, B256, U256, +}; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; -use std::{ - borrow::Cow, - collections::{hash_map, HashMap, HashSet}, -}; +use std::borrow::Cow; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -41,8 +42,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -68,8 +69,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -79,7 +80,10 @@ impl HashedPostState { /// Construct [`HashedPostState`] from a single [`HashedStorage`]. pub fn from_hashed_storage(hashed_address: B256, storage: HashedStorage) -> Self { - Self { accounts: HashMap::default(), storages: HashMap::from([(hashed_address, storage)]) } + Self { + accounts: HashMap::default(), + storages: HashMap::from_iter([(hashed_address, storage)]), + } } /// Set account entries on hashed state. @@ -121,7 +125,8 @@ impl HashedPostState { } // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + let mut storage_prefix_sets = + HashMap::with_capacity_and_hasher(self.storages.len(), Default::default()); for (hashed_address, hashed_storage) in &self.storages { account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 4a34fd31ad1..fa59b70d1fd 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; use reth_trie_common::{BranchNodeCompact, Nibbles}; -use std::collections::HashSet; /// The trie cursor factory for the trie updates. #[derive(Debug, Clone)] diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 146dbc213e5..d1c5247966d 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -3,9 +3,8 @@ use crate::{ trie_cursor::{CursorSubNode, TrieCursor}, BranchNodeCompact, Nibbles, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; #[cfg(feature = "metrics")] use crate::metrics::WalkerMetrics; From 1d5bd46594745cedc81ec5f03caea643aacb3761 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 13:48:36 +0100 Subject: [PATCH 746/970] chore: make op-node-testing- leaner (#12958) --- Cargo.lock | 11 ++++-- crates/e2e-test-utils/Cargo.toml | 8 ++++- crates/e2e-test-utils/src/engine_api.rs | 15 ++++---- crates/e2e-test-utils/src/lib.rs | 26 +++++++------- crates/e2e-test-utils/src/network.rs | 4 ++- crates/e2e-test-utils/src/node.rs | 33 ++++++++---------- crates/e2e-test-utils/src/payload.rs | 3 +- crates/e2e-test-utils/src/rpc.rs | 19 ++++------ crates/e2e-test-utils/src/traits.rs | 5 +-- crates/optimism/node/Cargo.toml | 42 +++++++++++++---------- crates/optimism/node/src/utils.rs | 3 +- crates/optimism/node/tests/e2e/p2p.rs | 9 +++-- crates/optimism/node/tests/it/priority.rs | 3 +- 13 files changed, 93 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f08a6ffe15..d751ff05aad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7106,21 +7106,27 @@ dependencies = [ "futures-util", "jsonrpsee", "op-alloy-rpc-types-engine", - "reth", "reth-chainspec", "reth-db", "reth-engine-local", + "reth-network", + "reth-network-api", "reth-network-peers", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-optimism-primitives", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", + "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-layer", + "reth-rpc-server-types", "reth-stages-types", + "reth-tasks", "reth-tokio-util", "reth-tracing", "serde_json", @@ -8335,7 +8341,6 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8347,6 +8352,7 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-evm", @@ -8362,6 +8368,7 @@ dependencies = [ "reth-provider", "reth-revm", "reth-rpc-server-types", + "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie-db", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 77b19085d40..bedacbecd75 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -11,22 +11,28 @@ repository.workspace = true workspace = true [dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-rpc-layer.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true reth-provider.workspace = true +reth-network-api.workspace = true +reth-network.workspace = true reth-node-api.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true reth-engine-local.workspace = true +reth-tasks.workspace = true # currently need to enable this for workspace level reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index cfa245e1de0..8c0f03bafd3 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,20 +1,17 @@ use crate::traits::PayloadEnvelopeExt; use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; use jsonrpsee::{ core::client::ClientT, http_client::{transport::HttpBackend, HttpClient}, }; -use reth::{ - api::{EngineTypes, PayloadBuilderAttributes}, - providers::CanonStateNotificationStream, - rpc::{ - api::EngineApiClient, - types::engine::{ForkchoiceState, PayloadStatusEnum}, - }, -}; use reth_chainspec::EthereumHardforks; +use reth_node_api::EngineTypes; use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_provider::CanonStateNotificationStream; +use reth_rpc_api::EngineApiClient; use reth_rpc_layer::AuthClientService; use std::{marker::PhantomData, sync::Arc}; @@ -83,7 +80,7 @@ impl EngineApiTestContext diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 7828f61c2af..45889a171c1 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,8 +1,7 @@ use futures_util::StreamExt; -use reth::api::BuiltPayload; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; use reth_payload_builder_primitives::{Events, PayloadBuilder}; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadTypes}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 0006989d316..8399a482dfd 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -2,19 +2,14 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; use alloy_rlp::Encodable; -use reth::{ - builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::api::{ - eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, - }, - DebugApiServer, - }, -}; use reth_chainspec::EthereumHardforks; -use reth_node_api::NodePrimitives; -use reth_node_builder::NodeTypes; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; +use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, +}; #[allow(missing_debug_implementations)] pub struct RpcTestContext { diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index d14445370d4..6d9bf14dbc1 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,6 +1,7 @@ -use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV3, +}; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; -use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; /// The execution payload envelope type. pub trait PayloadEnvelopeExt: Send + Sync + std::fmt::Debug { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index f11eb357832..5f100f0a28d 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -32,6 +32,7 @@ reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, optional = true } # op-reth reth-optimism-payload-builder.workspace = true @@ -62,7 +63,6 @@ parking_lot.workspace = true serde_json.workspace = true # test-utils dependencies -reth = { workspace = true, optional = true } reth-e2e-test-utils = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } tokio = { workspace = true, optional = true } @@ -70,9 +70,12 @@ tokio = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } +reth-tasks.workspace = true + alloy-primitives.workspace = true op-alloy-consensus.workspace = true alloy-signer-local.workspace = true @@ -82,27 +85,28 @@ futures.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", - "reth-optimism-consensus/optimism", - "reth-db/optimism", - "reth-optimism-node/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-optimism-node/optimism", + "reth-node-core/optimism" ] asm-keccak = [ - "reth-primitives/asm-keccak", - "reth/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak", - "reth-optimism-node/asm-keccak", + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", + "reth-optimism-node/asm-keccak", + "reth-node-core/asm-keccak" ] test-utils = [ - "reth", + "reth-tasks", "reth-e2e-test-utils", "alloy-genesis", "tokio", @@ -125,4 +129,4 @@ test-utils = [ reth-codec = [ "reth-primitives/reth-codec", "reth-optimism-primitives/reth-codec", -] \ No newline at end of file +] diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index b54015fef0c..e70e3503198 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,12 +1,13 @@ use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use alloy_rpc_types_engine::PayloadAttributes; use reth_e2e_test_utils::{ transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_tasks::TaskManager; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 3db4cfab869..90623d9e65d 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,5 @@ use alloy_rpc_types_engine::PayloadStatusEnum; use futures::StreamExt; -use reth::blockchain_tree::error::BlockchainTreeError; use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; @@ -90,10 +89,10 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), PayloadStatusEnum::Invalid { - validation_error: BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: (tip - reorg_depth) as u64 + 1, - } - .to_string(), + validation_error: format!( + "block number is lower than the last finalized block number {}", + (tip - reorg_depth) as u64 + 1 + ), }, ) .await; diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 66aeaa295cb..35be3dfd3ee 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -4,7 +4,6 @@ use alloy_consensus::TxEip1559; use alloy_genesis::Genesis; use alloy_network::TxSignerSync; use alloy_primitives::{Address, ChainId, TxKind}; -use reth::{args::DatadirArgs, tasks::TaskManager}; use reth_chainspec::EthChainSpec; use reth_db::test_utils::create_test_rw_db_with_path; use reth_e2e_test_utils::{ @@ -14,6 +13,7 @@ use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; use reth_node_builder::{ components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, }; +use reth_node_core::args::DatadirArgs; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; use reth_optimism_node::{ args::RollupArgs, @@ -29,6 +29,7 @@ use reth_optimism_primitives::OpPrimitives; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::providers::BlockchainProvider2; +use reth_tasks::TaskManager; use reth_transaction_pool::pool::BestPayloadTransactions; use std::sync::Arc; use tokio::sync::Mutex; From eac02d94588fe4ee3cb4acfd259a8b51f9f89b79 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 14:01:57 +0100 Subject: [PATCH 747/970] fix(trie): always create empty storage multiproofs (#12915) --- crates/trie/common/src/proofs.rs | 7 +++++-- crates/trie/trie/src/proof.rs | 7 ++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 1bc1a1a082f..78659116c3e 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,7 +2,11 @@ use crate::{Nibbles, TrieAccount}; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap}, + Address, Bytes, B256, U256, +}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, @@ -11,7 +15,6 @@ use alloy_trie::{ }; use itertools::Itertools; use reth_primitives_traits::Account; -use std::collections::{hash_map, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 895a3de153d..34315416cb8 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -103,7 +103,10 @@ where let retainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storages = HashMap::default(); + // Initialize all storage multiproofs as empty. + // Storage multiproofs for non empty tries will be overwritten if necessary. + let mut storages: HashMap<_, _> = + targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { @@ -132,6 +135,8 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + + // Overwrite storage multiproof. storages.insert(hashed_address, storage_multiproof); } } From 8589503a3b4fd13c62d6df846f92e06f4c4806d6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 28 Nov 2024 13:48:05 +0000 Subject: [PATCH 748/970] feat(engine): use `SparseStateTrie::update_account` in state root task (#12960) --- crates/engine/tree/src/tree/root.rs | 28 +++++----------------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 17419bc6a4a..7d25461c50a 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,13 +1,12 @@ //! State root task related functionality. -use alloy_primitives::map::{FbHashMap, HashMap, HashSet}; -use alloy_rlp::{BufMut, Encodable}; +use alloy_primitives::map::{HashMap, HashSet}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, }; use reth_trie::{ proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, - TrieAccount, TrieInput, EMPTY_ROOT_HASH, + TrieInput, }; use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; @@ -489,11 +488,9 @@ fn update_sparse_trie( trie.reveal_multiproof(targets, multiproof)?; // Update storage slots with new values and calculate storage roots. - let mut storage_roots = FbHashMap::default(); for (address, storage) in state.storages { if storage.wiped { trie.wipe_storage(address)?; - storage_roots.insert(address, EMPTY_ROOT_HASH); } for (slot, value) in storage.storage { @@ -510,27 +507,12 @@ fn update_sparse_trie( } } - storage_roots.insert(address, trie.storage_root(address).unwrap()); + trie.storage_root(address).unwrap(); } - // Update accounts with new values and include updated storage roots + // Update accounts with new values for (address, account) in state.accounts { - let account_nibbles = Nibbles::unpack(address); - - if let Some(account) = account { - let storage_root = storage_roots - .remove(&address) - .map(Some) - .unwrap_or_else(|| trie.storage_root(address)) - .unwrap_or(EMPTY_ROOT_HASH); - - let mut encoded = Vec::with_capacity(128); - TrieAccount::from((account, storage_root)).encode(&mut encoded as &mut dyn BufMut); - trie.update_account_leaf(account_nibbles, encoded)?; - } else { - // TODO: handle blinded node error - trie.remove_account_leaf(&account_nibbles)?; - } + trie.update_account(address, account.unwrap_or_default())?; } trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); From 1210fd938d05df193858d63ae1ac2771d396bbea Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 15:06:32 +0100 Subject: [PATCH 749/970] chore: make ethereum-node deps leaner (#12963) --- Cargo.lock | 6 +++++- crates/ethereum/node/Cargo.toml | 18 ++++++++++++------ crates/ethereum/node/tests/e2e/blobs.rs | 16 ++++++---------- crates/ethereum/node/tests/e2e/dev.rs | 6 +++--- crates/ethereum/node/tests/e2e/eth.rs | 8 +++----- crates/ethereum/node/tests/e2e/p2p.rs | 2 +- crates/ethereum/node/tests/e2e/rpc.rs | 11 ++++------- crates/ethereum/node/tests/e2e/utils.rs | 2 +- 8 files changed, 35 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d751ff05aad..b892592ded3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8106,12 +8106,13 @@ dependencies = [ "alloy-primitives", "alloy-provider", "alloy-rpc-types-beacon", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-sol-types", "eyre", "futures", "rand 0.8.5", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8126,11 +8127,14 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-payload-builder", + "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-eth-api", "reth-tasks", "reth-tracing", "reth-transaction-pool", diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 6ecd5437bfb..e6f47483b58 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -41,26 +41,32 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } eyre.workspace = true [dev-dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true +reth-node-core.workspace = true +reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true +reth-rpc-eth-api.workspace = true reth-tasks.workspace = true -futures.workspace = true + alloy-primitives.workspace = true -alloy-genesis.workspace = true -tokio.workspace = true -serde_json.workspace = true alloy-consensus.workspace = true alloy-provider.workspace = true -rand.workspace = true +alloy-genesis.workspace = true alloy-signer.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true + +futures.workspace = true +tokio.workspace = true +serde_json.workspace = true +rand.workspace = true [features] default = [] diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 976727bc815..11181051450 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,21 +1,17 @@ -use std::sync::Arc; - +use crate::utils::eth_payload_attributes; use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - rpc::types::engine::PayloadStatusEnum, - tasks::TaskManager, -}; +use alloy_rpc_types_engine::PayloadStatusEnum; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; - -use crate::utils::eth_payload_attributes; +use std::sync::Arc; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 6d09612c4e1..325575998c2 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,18 +1,18 @@ -use std::sync::Arc; - use alloy_eips::eip2718::Encodable2718; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; +use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; +use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index cb7517c0c93..a91ccf6e391 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -1,15 +1,13 @@ use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use std::sync::Arc; #[tokio::test] diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 5b2a6654fbb..f8680f47ae3 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -7,9 +7,9 @@ use alloy_provider::{ }, Provider, ProviderBuilder, SendableTx, }; +use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use reth::rpc::types::TransactionRequest; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index b1a11b1b5eb..54bfbc8205e 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -6,17 +6,14 @@ use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, SignedBidSubmissionV3, SignedBidSubmissionV4, }; +use alloy_rpc_types_engine::BlobsBundleV1; +use alloy_rpc_types_eth::TransactionRequest; use rand::{rngs::StdRng, Rng, SeedableRng}; -use reth::{ - payload::BuiltPayload, - rpc::{ - compat::engine::payload::block_to_payload_v3, - types::{engine::BlobsBundleV1, TransactionRequest}, - }, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; +use reth_node_core::rpc::compat::engine::payload::block_to_payload_v3; use reth_node_ethereum::EthereumNode; +use reth_payload_primitives::BuiltPayload; use std::sync::Arc; alloy_sol_types::sol! { diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 6e534f5dc0e..c3743de185f 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,5 +1,5 @@ use alloy_primitives::{Address, B256}; -use reth::rpc::types::engine::PayloadAttributes; +use alloy_rpc_types_engine::PayloadAttributes; use reth_payload_builder::EthPayloadBuilderAttributes; /// Helper function to create a new eth payload attributes From 8c86d63a428ad3a1602f7b342aac8bd57d39168a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 15:18:38 +0100 Subject: [PATCH 750/970] perf(trie): avoid update reallocation & track wiped (#12929) --- crates/trie/sparse/src/state.rs | 20 +++++++++----------- crates/trie/sparse/src/trie.rs | 14 ++++++++++++-- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index aad74ac0550..1cd13273e64 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -22,8 +22,6 @@ pub struct SparseStateTrie { storages: HashMap, /// Collection of revealed account and storage keys. revealed: HashMap>, - /// Collection of addresses that had their storage tries wiped. - wiped_storages: HashSet, /// Flag indicating whether trie updates should be retained. retain_updates: bool, /// Reusable buffer for RLP encoding of trie accounts. @@ -36,7 +34,6 @@ impl Default for SparseStateTrie { state: Default::default(), storages: Default::default(), revealed: Default::default(), - wiped_storages: Default::default(), retain_updates: false, account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), } @@ -275,9 +272,10 @@ impl SparseStateTrie { /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { - let Some(trie) = self.storages.get_mut(&address) else { return Ok(()) }; - self.wiped_storages.insert(address); - trie.wipe().map_err(Into::into) + if let Some(trie) = self.storages.get_mut(&address) { + trie.wipe()?; + } + Ok(()) } /// Calculates the hashes of the nodes below the provided level. @@ -302,8 +300,8 @@ impl SparseStateTrie { self.state.as_revealed_mut().map(|state| { let updates = state.take_updates(); TrieUpdates { - account_nodes: HashMap::from_iter(updates.updated_nodes), - removed_nodes: HashSet::from_iter(updates.removed_nodes), + account_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, storage_tries: self .storages .iter_mut() @@ -311,9 +309,9 @@ impl SparseStateTrie { let trie = trie.as_revealed_mut().unwrap(); let updates = trie.take_updates(); let updates = StorageTrieUpdates { - is_deleted: self.wiped_storages.contains(address), - storage_nodes: HashMap::from_iter(updates.updated_nodes), - removed_nodes: HashSet::from_iter(updates.removed_nodes), + is_deleted: updates.wiped, + storage_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, }; (*address, updates) }) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 2ecc3984445..97446680df4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -111,6 +111,7 @@ pub struct RevealedSparseTrie { prefix_set: PrefixSetMut, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, + /// Retained trie updates. updates: Option, } @@ -607,8 +608,10 @@ impl RevealedSparseTrie { /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) { + let updates_retained = self.updates.is_some(); *self = Self::default(); self.prefix_set = PrefixSetMut::all(); + self.updates = updates_retained.then(SparseTrieUpdates::wiped); } /// Return the root of the sparse trie. @@ -1030,12 +1033,18 @@ impl RlpNodeBuffers { pub struct SparseTrieUpdates { pub(crate) updated_nodes: HashMap, pub(crate) removed_nodes: HashSet, + pub(crate) wiped: bool, +} + +impl SparseTrieUpdates { + /// Create new wiped sparse trie updates. + pub fn wiped() -> Self { + Self { wiped: true, ..Default::default() } + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use super::*; use alloy_primitives::{map::HashSet, U256}; use alloy_rlp::Encodable; @@ -1057,6 +1066,7 @@ mod tests { proof::{ProofNodes, ProofRetainer}, HashBuilder, }; + use std::collections::BTreeMap; /// Pad nibbles to the length of a B256 hash with zeros on the left. fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { From e1b25c80c2536eca2043706e5628d0bc7166f710 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 16:52:25 +0100 Subject: [PATCH 751/970] perf(tree): keep storage trie around for updates (#12971) --- crates/engine/tree/src/tree/root.rs | 17 ++++++++--------- crates/trie/sparse/src/state.rs | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 7d25461c50a..09e48c4e6b0 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -10,7 +10,7 @@ use reth_trie::{ }; use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; -use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult}; +use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult, SparseTrieError}; use revm_primitives::{keccak256, EvmState, B256}; use std::{ collections::BTreeMap, @@ -489,25 +489,24 @@ fn update_sparse_trie( // Update storage slots with new values and calculate storage roots. for (address, storage) in state.storages { + let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; + if storage.wiped { - trie.wipe_storage(address)?; + storage_trie.wipe(); } for (slot, value) in storage.storage { let slot_nibbles = Nibbles::unpack(slot); if value.is_zero() { // TODO: handle blinded node error - trie.remove_storage_leaf(address, &slot_nibbles)?; + storage_trie.remove_leaf(&slot_nibbles)?; } else { - trie.update_storage_leaf( - address, - slot_nibbles, - alloy_rlp::encode_fixed_size(&value).to_vec(), - )?; + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; } } - trie.storage_root(address).unwrap(); + storage_trie.root(); } // Update accounts with new values diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 1cd13273e64..549a86733f8 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -62,7 +62,7 @@ impl SparseStateTrie { self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } - /// Returned mutable reference to storage sparse trie if it was revealed. + /// Returns mutable reference to storage sparse trie if it was revealed. pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) } From 55b758ac108a693f5cb237081331735649ac3100 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Thu, 28 Nov 2024 22:58:38 +0700 Subject: [PATCH 752/970] feat: generic data primitives EngineApiEvent (#12964) --- crates/engine/tree/src/engine.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 005d4e54399..947d025e9ab 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -10,7 +10,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use std::{ collections::HashSet, fmt::Display, @@ -270,25 +270,25 @@ impl From> for FromEngine { /// Event from the consensus engine. // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), + BeaconConsensus(BeaconConsensusEngineEvent), /// Backfill action is needed. BackfillAction(BackfillAction), /// Block download is needed. Download(DownloadRequest), } -impl EngineApiEvent { +impl EngineApiEvent { /// Returns `true` if the event is a backfill action. pub const fn is_backfill_action(&self) -> bool { matches!(self, Self::BackfillAction(_)) } } -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { +impl From> for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { Self::BeaconConsensus(event) } } From cca6372e8741405af6d7b6f0476411e265a0e7bf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 17:09:54 +0100 Subject: [PATCH 753/970] feat: integrate type specific engine validator in tree service (#12952) --- Cargo.lock | 4 +-- crates/e2e-test-utils/src/lib.rs | 6 +++- crates/engine/local/Cargo.toml | 2 +- crates/engine/local/src/service.rs | 10 +++--- crates/engine/service/Cargo.toml | 1 - crates/engine/service/src/service.rs | 19 +++++----- crates/engine/tree/Cargo.toml | 3 +- crates/engine/tree/src/tree/mod.rs | 36 +++++++++---------- crates/node/builder/src/launch/engine.rs | 46 ++++++++++++++---------- crates/node/builder/src/launch/mod.rs | 4 +-- crates/node/builder/src/rpc.rs | 4 +-- 11 files changed, 72 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b892592ded3..0b4111f0aef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7183,10 +7183,10 @@ dependencies = [ "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", + "reth-node-types", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-provider", "reth-prune", "reth-rpc-types-compat", @@ -7235,7 +7235,6 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", @@ -7279,7 +7278,6 @@ dependencies = [ "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 25683389864..15065377fab 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -5,6 +5,7 @@ use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::EngineValidator; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, @@ -131,7 +132,10 @@ where >, >, N::AddOns: RethRpcAddOns>>> - + EngineValidatorAddOn>>>, + + EngineValidatorAddOn< + Adapter>>, + Validator: EngineValidator, + >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index a1b74d13fee..d8a66e65e04 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -16,12 +16,12 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index e2b5e056d02..5838cb89116 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -19,7 +19,7 @@ use futures_util::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -31,9 +31,9 @@ use reth_engine_tree::{ tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; +use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; @@ -63,13 +63,14 @@ where { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, @@ -80,6 +81,7 @@ where ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, + V: EngineValidator>, { let chain_spec = provider.chain_spec(); let engine_kind = @@ -87,8 +89,6 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 8359c453dcc..8854fd18879 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,7 +18,6 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 44d145c9c0b..a54a2ef9e1a 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -3,7 +3,7 @@ use pin_project::pin_project; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -17,9 +17,8 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -65,7 +64,7 @@ where { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: E, chain_spec: Arc, @@ -77,10 +76,14 @@ where blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> Self + where + V: EngineValidator>, + { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; @@ -88,7 +91,6 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); @@ -148,7 +150,7 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; @@ -186,7 +188,7 @@ mod tests { let blockchain_db = BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); - + let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); @@ -204,6 +206,7 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), + engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 47e5c2b04fe..680b6933ebe 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,7 +16,7 @@ reth-beacon-consensus.workspace = true reth-blockchain-tree-api.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true -reth-chainspec.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true @@ -25,7 +25,6 @@ reth-network-p2p.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 44270cbfdf4..5dc8039afe6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1,8 +1,9 @@ use crate::{ backfill::{BackfillAction, BackfillSyncState}, chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, + engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, + tree::metrics::EngineApiMetrics, }; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockNumHash; @@ -24,18 +25,16 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateTracker, OnForkChoiceUpdated, + EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, SealedHeader, @@ -72,10 +71,6 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; -use crate::{ - engine::{EngineApiKind, EngineApiRequest}, - tree::metrics::EngineApiMetrics, -}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -472,11 +467,14 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler +where + T: EngineTypes, +{ provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, /// The half for sending messages to the engine. @@ -516,8 +514,8 @@ pub struct EngineApiTreeHandler { _primtives: PhantomData, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -540,7 +538,7 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where N: NodePrimitives, P: DatabaseProviderFactory @@ -552,7 +550,7 @@ where

::Provider: BlockReader, E: BlockExecutorProvider, T: EngineTypes, - Spec: Send + Sync + EthereumHardforks + 'static, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[allow(clippy::too_many_arguments)] @@ -560,7 +558,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, outgoing: UnboundedSender, state: EngineApiTreeState, canonical_in_memory_state: CanonicalInMemoryState, @@ -609,7 +607,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, @@ -2629,7 +2627,7 @@ mod tests { use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::ForkchoiceStatus; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; @@ -2701,7 +2699,7 @@ mod tests { MockEthProvider, MockExecutorProvider, EthEngineTypes, - ChainSpec, + EthereumEngineValidator, >, to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, @@ -2736,7 +2734,7 @@ mod tests { let provider = MockEthProvider::default(); let executor_provider = MockExecutorProvider::default(); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (from_tree_tx, from_tree_rx) = unbounded_channel(); diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 430ca31a5b1..b1141314d10 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -19,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, - PayloadTypes, + BlockTy, BuiltPayload, EngineValidator, FullNodeTypes, NodeTypesWithEngine, + PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -75,7 +75,14 @@ where T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns> - + EngineValidatorAddOn>, + + EngineValidatorAddOn< + NodeAdapter, + Validator: EngineValidator< + ::Engine, + Block = BlockTy, + >, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -196,10 +203,24 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + let event_sender = EventSender::default(); + let beacon_engine_handle = + BeaconConsensusEngineHandle::new(consensus_engine_tx.clone(), event_sender.clone()); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle: beacon_engine_handle.clone(), + jwt_secret, + }; + let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let mut engine_service = if ctx.is_dev() { let eth_service = LocalEngineService::new( ctx.consensus(), @@ -208,6 +229,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -231,6 +253,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -239,11 +262,6 @@ where Either::Right(eth_service) }; - let event_sender = EventSender::default(); - - let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( @@ -269,16 +287,6 @@ where ), ); - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - let RpcHandle { rpc_server_handles, rpc_registry } = add_ons.launch_add_ons(add_ons_ctx).await?; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 627145d2df7..9f2c027f76b 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -70,7 +70,7 @@ pub trait LaunchNode { type Node; /// Create and return a new node asynchronously. - fn launch_node(self, target: Target) -> impl Future> + Send; + fn launch_node(self, target: Target) -> impl Future>; } impl LaunchNode for F @@ -80,7 +80,7 @@ where { type Node = Node; - fn launch_node(self, target: Target) -> impl Future> + Send { + fn launch_node(self, target: Target) -> impl Future> { self(target) } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 2eae77f8d83..55313f3e989 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -601,7 +601,7 @@ where } /// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send + Clone { +pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. type Validator: EngineValidator<::Engine>; @@ -617,7 +617,7 @@ where Node: FullNodeComponents, Validator: EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Clone, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { type Validator = Validator; From 9f20ebc29ac6bf6134a03634aeeaf869d77fec7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 28 Nov 2024 17:23:27 +0100 Subject: [PATCH 754/970] refactor(tx-pool): add enum `InvalidKind` to `mark_invalid` (#12845) --- crates/ethereum/payload/src/lib.rs | 26 ++++++--- .../rpc-eth-api/src/helpers/pending_block.rs | 38 ++++++++++--- crates/transaction-pool/Cargo.toml | 1 - crates/transaction-pool/src/pool/best.rs | 54 +++++++++++++------ crates/transaction-pool/src/traits.rs | 8 +-- 5 files changed, 94 insertions(+), 33 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 49065ec0d8a..43bb0450488 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -27,13 +27,13 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, BlockExt, EthereumHardforks, Receipt, + Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, - ValidPoolTransaction, + error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, + BestTransactionsAttributes, TransactionPool, ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ @@ -228,7 +228,10 @@ where // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + ); continue } @@ -250,7 +253,13 @@ where // the iterator. This is similar to the gas limit condition // for regular transactions above. trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -270,7 +279,12 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 36ba2c1e84e..e1cd8f5c3c2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -17,8 +17,8 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, Block, BlockBody, BlockExt, Receipt, - SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, + Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -32,7 +32,9 @@ use reth_revm::{ }, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; +use reth_transaction_pool::{ + error::InvalidPoolTransactionError, BestTransactionsAttributes, TransactionPool, +}; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use std::time::{Duration, Instant}; @@ -292,7 +294,13 @@ pub trait LoadPendingBlock: // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + pool_tx.gas_limit(), + block_gas_limit, + ), + ); continue } @@ -300,7 +308,12 @@ pub trait LoadPendingBlock: // we don't want to leak any state changes made by private transactions, so we mark // them as invalid here which removes all dependent transactions from the iterator // before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); continue } @@ -316,7 +329,13 @@ pub trait LoadPendingBlock: // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -340,7 +359,12 @@ pub trait LoadPendingBlock: } else { // if the transaction is invalid, we can skip it and all of its // descendants - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 7c0f3476559..21463318816 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -52,7 +52,6 @@ bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true - # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 171faccf7c2..a4c91aae726 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,4 +1,5 @@ use crate::{ + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, @@ -6,7 +7,7 @@ use crate::{ use alloy_primitives::Address; use core::fmt; use reth_payload_util::PayloadTransactions; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{InvalidTransactionError, TransactionSignedEcRecovered}; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -27,8 +28,8 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -60,7 +61,11 @@ impl Iterator for BestTransactionsWithFees { { return Some(best); } - crate::traits::BestTransactions::mark_invalid(self, &best); + crate::traits::BestTransactions::mark_invalid( + self, + &best, + InvalidPoolTransactionError::Underpriced, + ); } } } @@ -95,7 +100,11 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. - pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { + pub(crate) fn mark_invalid( + &mut self, + tx: &Arc>, + _kind: InvalidPoolTransactionError, + ) { self.invalid.insert(tx.sender_id()); } @@ -154,8 +163,8 @@ impl BestTransactions { } impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item) { - Self::mark_invalid(self, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) } fn no_updates(&mut self) { @@ -199,7 +208,12 @@ impl Iterator for BestTransactions { if self.skip_blobs && best.transaction.transaction.is_eip4844() { // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned - self.mark_invalid(&best.transaction) + self.mark_invalid( + &best.transaction, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), + ) } else { return Some(best.transaction) } @@ -280,7 +294,10 @@ where if (self.predicate)(&best) { return Some(best) } - self.best.mark_invalid(&best); + self.best.mark_invalid( + &best, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); } } } @@ -290,8 +307,8 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item) { - crate::traits::BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -379,8 +396,8 @@ where I: crate::traits::BestTransactions>>, T: PoolTransaction, { - fn mark_invalid(&mut self, tx: &Self::Item) { - self.inner.mark_invalid(tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + self.inner.mark_invalid(tx, kind) } fn no_updates(&mut self) { @@ -450,7 +467,10 @@ mod tests { // mark the first tx as invalid let invalid = best.independent.iter().next().unwrap(); - best.mark_invalid(&invalid.transaction.clone()); + best.mark_invalid( + &invalid.transaction.clone(), + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); // iterator is empty assert!(best.next().is_none()); @@ -475,7 +495,11 @@ mod tests { > = Box::new(pool.best()); let tx = Iterator::next(&mut best).unwrap(); - crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + crate::traits::BestTransactions::mark_invalid( + &mut *best, + &tx, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); assert!(Iterator::next(&mut best).is_none()); } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 8945d713976..b5fc0db5204 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -806,7 +806,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -868,8 +868,8 @@ impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item) { - (**self).mark_invalid(transaction); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + (**self).mark_invalid(transaction, kind) } fn no_updates(&mut self) { @@ -887,7 +887,7 @@ where /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T) {} + fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} fn no_updates(&mut self) {} From ca72d456624e807f82e96f9ceb22ec44cca5a132 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 28 Nov 2024 17:25:24 +0100 Subject: [PATCH 755/970] chore(engine): remove uneeded if (#12974) --- crates/engine/tree/src/tree/root.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 09e48c4e6b0..2638f9deadc 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -137,9 +137,7 @@ impl ProofSequencer { } } - if !consecutive_proofs.is_empty() { - self.next_to_deliver += consecutive_proofs.len() as u64; - } + self.next_to_deliver += consecutive_proofs.len() as u64; consecutive_proofs } From 8e1945592b607de19ec79b3e7d1fc1bfab4e614b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 17:34:41 +0100 Subject: [PATCH 756/970] fix: cap request gas limit in eth estimate (#12970) --- crates/rpc/rpc-eth-api/src/helpers/estimate.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 465c33ada38..f9d62855be1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -57,7 +57,7 @@ pub trait EstimateCall: Call { request.nonce = None; // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; + let tx_request_gas_limit = request.gas.map(U256::from); let tx_request_gas_price = request.gas_price; // the gas limit of the corresponding block let block_env_gas_limit = block.gas_limit; @@ -65,7 +65,13 @@ pub trait EstimateCall: Call { // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .map(|mut tx_gas_limit| { + if block_env_gas_limit < tx_gas_limit { + // requested gas limit is higher than the allowed gas limit, capping + tx_gas_limit = block_env_gas_limit; + } + tx_gas_limit + }) .unwrap_or(block_env_gas_limit); // Configure the evm env From 856dc7d9c5ca0713f232f0a738b49eebe47d4dba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 17:44:38 +0100 Subject: [PATCH 757/970] ci: ignore ethereum crates for testing (#12976) --- .github/workflows/unit.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 11ef24b5f1b..4c927df8be0 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -35,11 +35,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 - type: book From 29d84e4cef13df85d70ca4a253d9b362f5999267 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 17:53:16 +0100 Subject: [PATCH 758/970] fix(tree): account info on state update in root task (#12978) --- crates/engine/tree/src/tree/root.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 2638f9deadc..254cec6c7e9 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -226,20 +226,19 @@ where let hashed_address = keccak256(address); let destroyed = account.is_selfdestructed(); - hashed_state_update.accounts.insert( - hashed_address, - if destroyed || account.is_empty() { None } else { Some(account.info.into()) }, - ); + let info = if account.is_empty() { None } else { Some(account.info.into()) }; + hashed_state_update.accounts.insert(hashed_address, info); if destroyed || !account.storage.is_empty() { - let storage = account.storage.into_iter().filter_map(|(slot, value)| { - value - .is_changed() - .then(|| (keccak256(B256::from(slot)), value.present_value)) - }); - hashed_state_update - .storages - .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + let storage = HashedStorage::from_iter( + destroyed, + account.storage.into_iter().filter_map(|(slot, value)| { + value + .is_changed() + .then(|| (keccak256(B256::from(slot)), value.present_value)) + }), + ); + hashed_state_update.storages.insert(hashed_address, storage); } } } From aea56135d577b0e32b306577f4ed4a89f99e376d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 28 Nov 2024 18:01:13 +0100 Subject: [PATCH 759/970] chore: lower tmp ban duration for trusted or static peers (#12961) --- crates/net/network/src/peers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d4b762e3e12..f8d18e15994 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -375,7 +375,7 @@ impl PeersManager { if peer.is_trusted() || peer.is_static() { // For misbehaving trusted or static peers, we provide a bit more leeway when // penalizing them. - ban_duration = self.backoff_durations.medium; + ban_duration = self.backoff_durations.low / 2; } } From ae395e871c0411701c073ce8bdcba6a16af90bc2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 28 Nov 2024 18:27:42 +0000 Subject: [PATCH 760/970] fix(engine): fetched proof targets in state root task (#12983) --- crates/engine/tree/src/tree/root.rs | 39 +++++++++++++++++++---------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 254cec6c7e9..e04aed7b112 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -158,15 +158,15 @@ impl ProofSequencer { /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] pub(crate) struct StateRootTask { - /// Task configuration + /// Task configuration. config: StateRootConfig, - /// Receiver for state root related messages + /// Receiver for state root related messages. rx: Receiver, - /// Sender for state root related messages + /// Sender for state root related messages. tx: Sender, - /// Proof targets that have been already fetched - fetched_proof_targets: HashSet, - /// Proof sequencing handler + /// Proof targets that have been already fetched. + fetched_proof_targets: HashMap>, + /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// The sparse trie used for the state root calculation. If [`None`], then update is in /// progress. @@ -216,7 +216,7 @@ where view: ConsistentDbView, input: Arc, update: EvmState, - fetched_proof_targets: &HashSet, + fetched_proof_targets: &HashMap>, proof_sequence_number: u64, state_root_message_sender: Sender, ) -> HashMap> { @@ -313,7 +313,7 @@ where ); // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them - let targets = get_proof_targets(&state, &HashSet::default()); + let targets = get_proof_targets(&state, &HashMap::default()); let tx = self.tx.clone(); rayon::spawn(move || { @@ -360,8 +360,9 @@ where self.proof_sequencer.next_sequence(), self.tx.clone(), ); - self.fetched_proof_targets.extend(targets.keys()); - self.fetched_proof_targets.extend(targets.values().flatten()); + for (address, slots) in targets { + self.fetched_proof_targets.entry(address).or_default().extend(slots) + } } StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { proofs_processed += 1; @@ -458,15 +459,27 @@ where fn get_proof_targets( state_update: &HashedPostState, - fetched_proof_targets: &HashSet, + fetched_proof_targets: &HashMap>, ) -> HashMap> { state_update .accounts .keys() - .filter(|hashed_address| !fetched_proof_targets.contains(*hashed_address)) + .filter(|hashed_address| !fetched_proof_targets.contains_key(*hashed_address)) .map(|hashed_address| (*hashed_address, HashSet::default())) .chain(state_update.storages.iter().map(|(hashed_address, storage)| { - (*hashed_address, storage.storage.keys().copied().collect()) + let fetched_storage_proof_targets = fetched_proof_targets.get(hashed_address); + ( + *hashed_address, + storage + .storage + .keys() + .filter(|slot| { + !fetched_storage_proof_targets + .is_some_and(|targets| targets.contains(*slot)) + }) + .copied() + .collect(), + ) })) .collect() } From da53d7698fbb5002f6bde37f5da6871b0fd8c24b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 28 Nov 2024 19:38:31 +0100 Subject: [PATCH 761/970] perf(trie): init hashed storage only on existing **changed** slots (#12980) --- crates/engine/tree/src/tree/root.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index e04aed7b112..602e87a63db 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -229,16 +229,20 @@ where let info = if account.is_empty() { None } else { Some(account.info.into()) }; hashed_state_update.accounts.insert(hashed_address, info); - if destroyed || !account.storage.is_empty() { - let storage = HashedStorage::from_iter( - destroyed, - account.storage.into_iter().filter_map(|(slot, value)| { - value - .is_changed() - .then(|| (keccak256(B256::from(slot)), value.present_value)) - }), + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value + .is_changed() + .then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state_update.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), ); - hashed_state_update.storages.insert(hashed_address, storage); } } } From 793fc23e0529dca75371d2a7d4a78fe202b84748 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 28 Nov 2024 23:35:32 +0400 Subject: [PATCH 762/970] fix: correctly poll `NetworkState` (#12973) --- crates/net/network/src/state.rs | 58 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 473c76c260f..5d7c0a9f654 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -385,10 +385,7 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome( - &mut self, - outcome: BlockResponseOutcome, - ) -> Option> { + fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -397,7 +394,6 @@ impl NetworkState { self.peers_manager.apply_reputation_change(&peer, reputation_change); } } - None } /// Invoked when received a response from a connected peer. @@ -405,21 +401,19 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response( - &mut self, - peer: PeerId, - resp: PeerResponseResult, - ) -> Option> { - match resp { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) { + let outcome = match resp { PeerResponseResult::BlockHeaders(res) => { - let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_headers_response(peer, res) } PeerResponseResult::BlockBodies(res) => { - let outcome = self.state_fetcher.on_block_bodies_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_bodies_response(peer, res) } _ => None, + }; + + if let Some(outcome) = outcome { + self.on_block_response_outcome(outcome); } } @@ -443,13 +437,14 @@ impl NetworkState { } } - // need to buffer results here to make borrow checker happy - let mut closed_sessions = Vec::new(); - let mut received_responses = Vec::new(); + loop { + // need to buffer results here to make borrow checker happy + let mut closed_sessions = Vec::new(); + let mut received_responses = Vec::new(); - // poll all connected peers for responses - for (id, peer) in &mut self.active_peers { - if let Some(mut response) = peer.pending_response.take() { + // poll all connected peers for responses + for (id, peer) in &mut self.active_peers { + let Some(mut response) = peer.pending_response.take() else { continue }; match response.poll(cx) { Poll::Ready(res) => { // check if the error is due to a closed channel to the session @@ -460,7 +455,8 @@ impl NetworkState { "Request canceled, response channel from session closed." ); // if the channel is closed, this means the peer session is also - // closed, in which case we can invoke the [Self::on_closed_session] + // closed, in which case we can invoke the + // [Self::on_closed_session] // immediately, preventing followup requests and propagate the // connection dropped error closed_sessions.push(*id); @@ -474,15 +470,17 @@ impl NetworkState { } }; } - } - for peer in closed_sessions { - self.on_session_closed(peer) - } + for peer in closed_sessions { + self.on_session_closed(peer) + } + + if received_responses.is_empty() { + break; + } - for (peer_id, resp) in received_responses { - if let Some(action) = self.on_eth_response(peer_id, resp) { - self.queued_messages.push_back(action); + for (peer_id, resp) in received_responses { + self.on_eth_response(peer_id, resp); } } @@ -491,6 +489,8 @@ impl NetworkState { self.on_peer_action(action); } + // We need to poll again tn case we have received any responses because they may have + // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending } From 9fe459e2ef2fef5bde07ff136577cdd8490bcf59 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 08:55:38 +0400 Subject: [PATCH 763/970] fix: disable state root calculation for eth_simulateV1 (#12993) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 1 - crates/rpc/rpc-eth-types/src/simulate.rs | 49 ++++++++++------------ 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index c7f346e951e..f9441f0630a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -201,7 +201,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { parent_hash, total_difficulty, return_full_transactions, - &db, this.tx_resp_builder(), )?; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 5a0daa1b42f..a10b4afff9d 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -12,16 +12,12 @@ use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, BlockBody, BlockWithSenders, Receipt, TransactionSigned, }; -use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; -use reth_storage_api::StateRootProvider; -use reth_trie::{HashedPostState, HashedStorage}; -use revm::{db::CacheDB, Database}; -use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm::Database; +use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::{api::FromEthApiError, ToRpcError}, EthApiError, RevertError, RpcInvalidTransactionError, }; @@ -143,7 +139,6 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::complexity)] pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -151,7 +146,6 @@ pub fn build_block>( parent_hash: B256, total_difficulty: U256, full_transactions: bool, - db: &CacheDB>>, tx_resp_builder: &T, ) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -229,24 +223,27 @@ pub fn build_block>( calls.push(call); } - let mut hashed_state = HashedPostState::default(); - for (address, account) in &db.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - } - } - - let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + // TODO: uncomment once performance cost is acceptable + // + // let mut hashed_state = HashedPostState::default(); + // for (address, account) in &db.accounts { + // let hashed_address = keccak256(address); + // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); + + // let storage = hashed_state + // .storages + // .entry(hashed_address) + // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); + + // for (slot, value) in &account.storage { + // let slot = B256::from(*slot); + // let hashed_slot = keccak256(slot); + // storage.storage.insert(hashed_slot, *value); + // } + // } + + // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + let state_root = B256::ZERO; let header = alloy_consensus::Header { beneficiary: block_env.coinbase, From 3f9816e1c4d5302bd5a3f743f4f10ab079c2fd15 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 08:56:48 +0400 Subject: [PATCH 764/970] feat: integrate generic primitives into pruner (#12995) --- Cargo.lock | 1 + crates/primitives-traits/src/node.rs | 14 ++----------- crates/prune/prune/Cargo.toml | 2 ++ crates/prune/prune/src/builder.rs | 9 ++++++--- crates/prune/prune/src/segments/receipts.rs | 16 ++++++++++----- crates/prune/prune/src/segments/set.rs | 5 +++-- .../src/segments/static_file/receipts.rs | 5 +++-- .../src/segments/static_file/transactions.rs | 13 ++++++++---- .../prune/prune/src/segments/user/receipts.rs | 13 ++++++++---- .../src/segments/user/receipts_by_logs.rs | 20 ++++++++++++++----- crates/stages/stages/src/stages/prune.rs | 7 ++++--- 11 files changed, 65 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b4111f0aef..04f3b854e9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8693,6 +8693,7 @@ dependencies = [ name = "reth-prune" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 4adf258177e..e610c094ba2 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -2,7 +2,7 @@ use core::fmt; use crate::{ Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, - FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, + FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, Receipt, }; /// Configures all the primitive types of the node. @@ -38,17 +38,7 @@ pub trait NodePrimitives: + MaybeArbitrary + 'static; /// A receipt. - type Receipt: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + MaybeArbitrary - + 'static; + type Receipt: Receipt; } /// Helper trait that sets trait bounds on [`NodePrimitives`]. pub trait FullNodePrimitives diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 41156d3e56b..f772ff54669 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -22,9 +22,11 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 8088bd7e12b..4fd56617121 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -2,8 +2,9 @@ use crate::{segments::SegmentSet, Pruner}; use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, @@ -80,7 +81,9 @@ impl PrunerBuilder { PF: DatabaseProviderFactory< ProviderRW: PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, > + StaticFileProviderFactory< Primitives = ::Primitives, >, @@ -104,7 +107,7 @@ impl PrunerBuilder { static_file_provider: StaticFileProvider, ) -> Pruner where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + BlockReader + PruneCheckpointWriter, diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index c081bf88c7d..a365738a777 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -6,10 +6,11 @@ //! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{ PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -21,7 +22,10 @@ pub(crate) fn prune( input: PruneInput, ) -> Result where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, @@ -35,7 +39,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 198d01ce44d..d7bbee1042b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -3,7 +3,8 @@ use crate::segments::{ UserReceipts, }; use alloy_eips::eip2718::Encodable2718; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, StaticFileProviderFactory, @@ -46,7 +47,7 @@ impl SegmentSet { impl SegmentSet where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + PruneCheckpointWriter + BlockReader, diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index 5221418674a..6cdc5375990 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,7 +2,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -23,7 +24,7 @@ impl Receipts { impl Segment for Receipts where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + PruneCheckpointWriter + TransactionsProvider diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 7dc7a23191a..20274e5dc70 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -3,7 +3,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, TransactionsProvider, @@ -27,8 +28,10 @@ impl Transactions { impl Segment for Transactions where - Provider: - DBProvider + TransactionsProvider + BlockReader + StaticFileProviderFactory, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + StaticFileProviderFactory>, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions @@ -56,7 +59,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 5bc9feaf023..97708ad6de1 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,10 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -23,7 +24,11 @@ impl Receipts { impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee404b074c3..778aac1e7b9 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -3,8 +3,12 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; +use alloy_consensus::TxReceipt; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, MINIMUM_PRUNING_DISTANCE, @@ -23,7 +27,11 @@ impl ReceiptsByLogs { impl Segment for ReceiptsByLogs where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs @@ -141,12 +149,14 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && - receipt.logs.iter().any(|log| { + receipt.logs().iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) }); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 527f5376697..7e5d7af46ee 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,4 +1,5 @@ -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, @@ -41,7 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::Prune @@ -130,7 +131,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::PruneSenderRecovery From 599b808fb692a9860a3fe25498f3864e6321e250 Mon Sep 17 00:00:00 2001 From: Zach Obront Date: Fri, 29 Nov 2024 00:20:50 -0600 Subject: [PATCH 765/970] feat(ci): add rv32im compilation checks (#12925) --- .github/assets/check_rv32imac.sh | 61 ++++++++++++++++++++++++++++++++ .github/workflows/lint.yml | 16 +++++++++ 2 files changed, 77 insertions(+) create mode 100755 .github/assets/check_rv32imac.sh diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh new file mode 100755 index 00000000000..0112e5cec17 --- /dev/null +++ b/.github/assets/check_rv32imac.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +set +e # Disable immediate exit on error + +# Array of crates to check +crates_to_check=( + reth-codecs-derive + # reth-evm + # reth-primitives + # reth-primitives-traits + # reth-optimism-forks + # reth-optimism-chainspec +) + +# Array to hold the results +results=() +# Flag to track if any command fails +any_failed=0 + +for crate in "${crates_to_check[@]}"; do + cmd="cargo +stable build -p $crate --target riscv32imac-unknown-none-elf --no-default-features" + + if [ -n "$CI" ]; then + echo "::group::$cmd" + else + printf "\n%s:\n %s\n" "$crate" "$cmd" + fi + + set +e # Disable immediate exit on error + # Run the command and capture the return code + $cmd + ret_code=$? + set -e # Re-enable immediate exit on error + + # Store the result in the dictionary + if [ $ret_code -eq 0 ]; then + results+=("1:✅:$crate") + else + results+=("2:❌:$crate") + any_failed=1 + fi + + if [ -n "$CI" ]; then + echo "::endgroup::" + fi +done + +# Sort the results by status and then by crate name +IFS=$'\n' sorted_results=($(sort <<<"${results[*]}")) +unset IFS + +# Print summary +echo -e "\nSummary of build results:" +for result in "${sorted_results[@]}"; do + status="${result#*:}" + status="${status%%:*}" + crate="${result##*:}" + echo "$status $crate" +done + +# Exit with a non-zero status if any command fails +exit $any_failed diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7e6b8747fff..61ba54e9556 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -75,6 +75,22 @@ jobs: - name: Run Wasm checks run: .github/assets/check_wasm.sh + riscv: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: riscv32imac-unknown-none-elf + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - uses: dcarbone/install-jq-action@v3 + - name: Run RISC-V checks + run: .github/assets/check_rv32imac.sh + crate-checks: runs-on: ubuntu-latest timeout-minutes: 30 From 3b4edb0a69fc8a8ab068a71c1bd4a1bf2dd836ea Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 10:24:11 +0400 Subject: [PATCH 766/970] feat: use generic `SignedTx` in `SenderRecoveryStage` (#12996) --- crates/cli/commands/src/db/get.rs | 15 ++++++------ .../src/transaction/signed.rs | 10 ++++++-- crates/primitives/src/transaction/mod.rs | 5 ++-- crates/primitives/src/transaction/pooled.rs | 21 +++++++++++++--- .../stages/src/stages/sender_recovery.rs | 24 +++++++++++-------- 5 files changed, 50 insertions(+), 25 deletions(-) diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 8f9a5f1d322..13b7b70347e 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -9,6 +9,7 @@ use reth_db::{ }; use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; +use reth_node_api::{ReceiptTy, TxTy}; use reth_node_builder::NodeTypesWithDB; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; @@ -65,14 +66,12 @@ impl Command { StaticFileSegment::Headers => { (table_key::(&key)?, >::MASK) } - StaticFileSegment::Transactions => ( - table_key::(&key)?, - ::Value>>::MASK, - ), - StaticFileSegment::Receipts => ( - table_key::(&key)?, - ::Value>>::MASK, - ), + StaticFileSegment::Transactions => { + (table_key::(&key)?, >>::MASK) + } + StaticFileSegment::Receipts => { + (table_key::(&key)?, >>::MASK) + } }; let content = tool.provider_factory.static_file_provider().find_static_file( diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index ae9a8f0d2ac..5e0a91b4da2 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,7 +1,7 @@ //! API of a signed transaction. use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; -use alloc::fmt; +use alloc::{fmt, vec::Vec}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use core::hash::Hash; @@ -61,7 +61,13 @@ pub trait SignedTransaction: /// /// Returns `None` if the transaction's signature is invalid, see also /// `reth_primitives::transaction::recover_signer_unchecked`. - fn recover_signer_unchecked(&self) -> Option

; + fn recover_signer_unchecked(&self) -> Option
{ + self.recover_signer_unchecked_with_buf(&mut Vec::new()) + } + + /// Same as [`Self::recover_signer_unchecked`] but receives a buffer to operate on. This is used + /// during batch recovery to avoid allocating a new buffer for each transaction. + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index db789d1f6de..e1524aa1dc8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1245,14 +1245,15 @@ impl SignedTransaction for TransactionSigned { recover_signer(&self.signature, signature_hash) } - fn recover_signer_unchecked(&self) -> Option
{ + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { return Some(from) } - let signature_hash = self.signature_hash(); + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); recover_signer_unchecked(&self.signature, signature_hash) } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 145660f44c7..979a55f2739 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -8,10 +8,11 @@ use super::{ use crate::{ BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; +use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - Signed, TxEip4844WithSidecar, + SignableTransaction, Signed, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -27,6 +28,7 @@ use bytes::Buf; use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -153,6 +155,18 @@ impl PooledTransactionsElement { } } + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(out), + Self::Eip2930(tx) => tx.tx().encode_for_signing(out), + Self::Eip1559(tx) => tx.tx().encode_for_signing(out), + Self::BlobTransaction(tx) => tx.tx().encode_for_signing(out), + Self::Eip7702(tx) => tx.tx().encode_for_signing(out), + } + } + /// Create [`TransactionSignedEcRecovered`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { @@ -600,8 +614,9 @@ impl SignedTransaction for PooledTransactionsElement { recover_signer(self.signature(), signature_hash) } - fn recover_signer_unchecked(&self) -> Option
{ - let signature_hash = self.signature_hash(); + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); recover_signer_unchecked(self.signature(), signature_hash) } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a6c2537c185..674d035021d 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,13 +1,14 @@ use alloy_primitives::{Address, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; -use reth_db::{static_file::TransactionMask, tables, RawValue}; +use reth_db::{static_file::TransactionMask, table::Value, tables, RawValue}; use reth_db_api::{ cursor::DbCursorRW, transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; -use reth_primitives::{GotExpected, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{GotExpected, NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, StaticFileProviderFactory, StatsReader, @@ -59,7 +60,7 @@ impl Stage for SenderRecoveryStage where Provider: DBProvider + BlockReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, { @@ -233,7 +234,9 @@ fn setup_range_recovery( provider: &Provider, ) -> mpsc::Sender, RecoveryResultSender)>> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: DBProvider + + HeaderProvider + + StaticFileProviderFactory>, { let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); let static_file_provider = provider.static_file_provider(); @@ -254,9 +257,9 @@ where chunk_range.clone(), |cursor, number| { Ok(cursor - .get_one::>>( - number.into(), - )? + .get_one::::SignedTx>, + >>(number.into())? .map(|tx| (number, tx))) }, |_| true, @@ -300,17 +303,18 @@ where } #[inline] -fn recover_sender( - (tx_id, tx): (TxNumber, TransactionSignedNoHash), +fn recover_sender( + (tx_id, tx): (TxNumber, T), rlp_buf: &mut Vec, ) -> Result<(u64, Address), Box> { + rlp_buf.clear(); // We call [Signature::encode_and_recover_unchecked] because transactions run in the pipeline // are known to be valid - this means that we do not need to check whether or not the `s` // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. let sender = tx - .encode_and_recover_unchecked(rlp_buf) + .recover_signer_unchecked_with_buf(rlp_buf) .ok_or(SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }))?; Ok((tx_id, sender)) From 39d4c1a4275b2fb392512e2dd2ef7da37b1685a8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 07:26:18 +0100 Subject: [PATCH 767/970] chore: use new payload validator in tree (#12987) --- crates/engine/tree/src/tree/mod.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5dc8039afe6..fd0e5aeec83 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -34,7 +34,7 @@ use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, SealedHeader, @@ -2532,12 +2532,10 @@ where state: ForkchoiceState, version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { + if let Err(err) = + self.payload_validator.validate_payload_attributes_against_header(&attrs, head) + { + warn!(target: "engine::tree", %err, ?head, "Invalid payload attributes"); return OnForkChoiceUpdated::invalid_payload_attributes() } From 8590f7a5d5b2a2d300d8f41a4ccf10168066a70f Mon Sep 17 00:00:00 2001 From: John <153272819+hishope@users.noreply.github.com> Date: Fri, 29 Nov 2024 14:33:26 +0800 Subject: [PATCH 768/970] chore: remove redundant words in comment (#12997) Signed-off-by: hishope --- crates/net/eth-wire-types/src/broadcast.rs | 2 +- crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h | 6 +++--- crates/storage/libmdbx-rs/src/flags.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 25ce7f3b350..b54fd0df2db 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -309,7 +309,7 @@ impl From> for NewPooledTransactionHashes66 { } } -/// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, +/// Same as [`NewPooledTransactionHashes66`] but extends that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index dfcba66063a..2665931de52 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -1413,7 +1413,7 @@ enum MDBX_env_flags_t { * \ref mdbx_env_set_syncbytes() and \ref mdbx_env_set_syncperiod() functions * could be very useful with `MDBX_SAFE_NOSYNC` flag. * - * The number and volume of of disk IOPs with MDBX_SAFE_NOSYNC flag will + * The number and volume of disk IOPs with MDBX_SAFE_NOSYNC flag will * exactly the as without any no-sync flags. However, you should expect a * larger process's [work set](https://bit.ly/2kA2tFX) and significantly worse * a [locality of reference](https://bit.ly/2mbYq2J), due to the more @@ -2079,7 +2079,7 @@ enum MDBX_option_t { * for all processes interacting with the database. * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K + * track readers in the environment. The default is about 100 for 4K * system page size. Starting a read-only transaction normally ties a lock * table slot to the current thread until the environment closes or the thread * exits. If \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the @@ -3343,7 +3343,7 @@ mdbx_limits_txnsize_max(intptr_t pagesize); * \ingroup c_settings * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K system + * track readers in the environment. The default is about 100 for 4K system * page size. Starting a read-only transaction normally ties a lock table slot * to the current thread until the environment closes or the thread exits. If * \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the slot to the diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index d733327cefa..1457195be78 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -56,7 +56,7 @@ pub enum SyncMode { /// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively /// for batch committing or nested transaction (in some cases). /// - /// The number and volume of of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the + /// The number and volume of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the /// as without any no-sync flags. However, you should expect a larger process's work set /// and significantly worse a locality of reference, due to the more intensive allocation /// of previously unused pages and increase the size of the database. From fa9cabd9753c9f3858e33d8ecd28eea580d6b283 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 08:18:45 +0100 Subject: [PATCH 769/970] chore: acquire trace guard for eth_simulate_v1 (#12935) --- crates/rpc/rpc-eth-api/src/core.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 9cd9ba2921a..6500c304978 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -625,6 +625,7 @@ where block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); + let _permit = self.tracing_task_guard().clone().acquire_owned().await; Ok(EthCall::simulate_v1(self, payload, block_number).await?) } From b10f5769333226d2993b598deca6b6438c00da45 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Fri, 29 Nov 2024 14:09:37 +0700 Subject: [PATCH 770/970] chore: make generic header consensus validation (#12965) --- crates/consensus/common/src/validation.rs | 84 +++++++++++++---------- crates/ethereum/consensus/src/lib.rs | 14 ++-- crates/optimism/consensus/src/lib.rs | 12 ++-- 3 files changed, 64 insertions(+), 46 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index b5314cdd1ec..9e7f8d451ff 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,7 +1,10 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, Header}; -use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader}; +use alloy_eips::{ + calc_next_block_base_fee, + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, +}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ @@ -162,11 +165,11 @@ pub fn validate_block_pre_execution( /// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` /// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB` -pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusError> { - let blob_gas_used = header.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; +pub fn validate_4844_header_standalone(header: &H) -> Result<(), ConsensusError> { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; - if header.parent_beacon_block_root.is_none() { + if header.parent_beacon_block_root().is_none() { return Err(ConsensusError::ParentBeaconBlockRootMissing) } @@ -201,8 +204,8 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. #[inline] -pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - let extradata_len = header.extra_data.len(); +pub fn validate_header_extradata(header: &H) -> Result<(), ConsensusError> { + let extradata_len = header.extra_data().len(); if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { @@ -215,21 +218,21 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> /// This function ensures that the header block number is sequential and that the hash of the parent /// header matches the parent hash in the header. #[inline] -pub fn validate_against_parent_hash_number( - header: &Header, +pub fn validate_against_parent_hash_number( + header: &H, parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number { + if parent.number + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { parent_block_number: parent.number, - block_number: header.number, + block_number: header.number(), }) } - if parent.hash() != header.parent_hash { + if parent.hash() != header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: header.parent_hash, expected: parent.hash() }.into(), + GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(), )) } @@ -238,23 +241,30 @@ pub fn validate_against_parent_hash_number( /// Validates the base fee against the parent and EIP-1559 rules. #[inline] -pub fn validate_against_parent_eip1559_base_fee( - header: &Header, - parent: &Header, +pub fn validate_against_parent_eip1559_base_fee< + H: BlockHeader, + ChainSpec: EthChainSpec + EthereumHardforks, +>( + header: &H, + parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { - let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { alloy_eips::eip1559::INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have // them. - parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp)) - .ok_or(ConsensusError::BaseFeeMissing)? + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { @@ -269,14 +279,14 @@ pub fn validate_against_parent_eip1559_base_fee( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { - if header.timestamp <= parent.timestamp { + if header.timestamp() <= parent.timestamp() { return Err(ConsensusError::TimestampIsInPast { - parent_timestamp: parent.timestamp, - timestamp: header.timestamp, + parent_timestamp: parent.timestamp(), + timestamp: header.timestamp(), }) } Ok(()) @@ -286,9 +296,9 @@ pub const fn validate_against_parent_timestamp( /// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and /// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the /// parent header fields. -pub fn validate_against_parent_4844( - header: &Header, - parent: &Header, +pub fn validate_against_parent_4844( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): // @@ -296,13 +306,13 @@ pub fn validate_against_parent_4844( // > are evaluated as 0. // // This means in the first post-fork block, calc_excess_blob_gas will return 0. - let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); - let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + let parent_blob_gas_used = parent.blob_gas_used().unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas().unwrap_or(0); - if header.blob_gas_used.is_none() { + if header.blob_gas_used().is_none() { return Err(ConsensusError::BlobGasUsedMissing) } - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; let expected_excess_blob_gas = calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); @@ -320,7 +330,7 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 2c260c4a7d1..96dfbae3f16 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -134,7 +134,7 @@ impl HeaderVa // Ensures that EIP-4844 fields are valid once cancun is active. if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_4844_header_standalone(header)?; + validate_4844_header_standalone(header.header())?; } else if header.blob_gas_used.is_some() { return Err(ConsensusError::BlobGasUsedUnexpected) } else if header.excess_blob_gas.is_some() { @@ -159,19 +159,23 @@ impl HeaderVa header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. self.validate_against_parent_gas_limit(header, parent)?; - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 69d94378582..cb357db924a 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -101,17 +101,21 @@ impl HeaderValidator for OpBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; if self.chain_spec.is_bedrock_active_at_block(header.number) { - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; } - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) From 88bde87f7045a44ec384ed65cc1d97e57ea2c87f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 08:35:12 +0100 Subject: [PATCH 771/970] chore: reduce reth-primitives usage in node-core (#13000) --- Cargo.lock | 1 + crates/node/core/Cargo.toml | 1 + crates/node/core/src/lib.rs | 6 ++++-- crates/node/core/src/node_config.rs | 18 +++++++++++------- crates/node/core/src/utils.rs | 3 ++- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 04f3b854e9e..cf91db21486 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8067,6 +8067,7 @@ dependencies = [ "reth-db", "reth-discv4", "reth-discv5", + "reth-ethereum-forks", "reth-net-nat", "reth-network", "reth-network-p2p", diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index c667a56293c..0ede9fe80c4 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -34,6 +34,7 @@ reth-net-nat.workspace = true reth-network-peers.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index a69a255a3c6..aa4f72bd6a4 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -15,9 +15,11 @@ pub mod exit; pub mod node_config; pub mod utils; pub mod version; -/// Re-exported from `reth_primitives`. + +/// Re-exported primitive types pub mod primitives { - pub use reth_primitives::*; + pub use reth_ethereum_forks::*; + pub use reth_primitives_traits::*; } /// Re-export of `reth_rpc_*` crates. diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 24d5588b688..2fd39bde82f 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -9,22 +9,26 @@ use crate::{ utils::get_single_header, }; use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_ethereum_forks::Head; use reth_network_p2p::headers::client::HeadersClient; -use serde::{de::DeserializeOwned, Serialize}; -use std::{fs, path::Path}; - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{Head, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs, + net::SocketAddr, + path::{Path, PathBuf}, + sync::Arc, +}; use tracing::*; /// This includes all necessary configuration to launch the node. diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index e52af4b46fe..65f90f27eb7 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -9,7 +9,8 @@ use reth_consensus::Consensus; use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::SealedHeader; use std::{ env::VarError, path::{Path, PathBuf}, From 37dca234023fbfdc55ceb65f06b514465193243f Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Fri, 29 Nov 2024 03:41:39 -0600 Subject: [PATCH 772/970] replace thiserror-no-std with thiserror (#12432) Co-authored-by: Matthias Seitz --- Cargo.lock | 112 +++++++++++--------------- Cargo.toml | 3 +- crates/ethereum-forks/Cargo.toml | 4 +- crates/ethereum-forks/src/forkid.rs | 2 +- crates/storage/nippy-jar/Cargo.toml | 1 - crates/storage/nippy-jar/src/error.rs | 2 +- 6 files changed, 51 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf91db21486..c0ad6f5b230 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2624,7 +2624,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "walkdir", ] @@ -2780,7 +2780,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -2868,7 +2868,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", ] @@ -6502,7 +6502,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -6538,7 +6538,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tower 0.4.13", "tracing", @@ -6592,7 +6592,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -6748,7 +6748,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tikv-jemallocator", "tracy-client", ] @@ -6893,7 +6893,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -6949,7 +6949,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", ] @@ -6991,7 +6991,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -7016,7 +7016,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7042,7 +7042,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -7081,7 +7081,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7158,7 +7158,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7213,7 +7213,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", ] @@ -7240,7 +7240,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", ] @@ -7295,7 +7295,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7340,7 +7340,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -7373,7 +7373,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7401,7 +7401,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -7467,7 +7467,7 @@ dependencies = [ "proptest-derive", "rustc-hash 2.0.0", "serde", - "thiserror-no-std", + "thiserror 2.0.3", ] [[package]] @@ -7664,7 +7664,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", ] @@ -7691,7 +7691,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -7734,7 +7734,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7759,7 +7759,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", ] @@ -7798,7 +7798,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7858,7 +7858,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7883,7 +7883,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", ] @@ -7921,7 +7921,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "url", ] @@ -7952,7 +7952,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", "zstd", ] @@ -8087,7 +8087,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "toml", "tracing", @@ -8414,7 +8414,7 @@ dependencies = [ "reth-trie", "revm", "sha2 0.10.8", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", ] @@ -8477,7 +8477,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -8542,7 +8542,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", ] @@ -8717,7 +8717,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.0.0", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -8738,7 +8738,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.3", "toml", ] @@ -8825,7 +8825,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tower 0.4.13", @@ -8919,7 +8919,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-util", "tower 0.4.13", @@ -8960,7 +8960,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9044,7 +9044,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9146,7 +9146,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9173,7 +9173,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9277,7 +9277,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", "tracing-futures", @@ -9361,7 +9361,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9473,7 +9473,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9498,7 +9498,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -10762,26 +10762,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "thiserror-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" -dependencies = [ - "thiserror-impl-no-std", -] - [[package]] name = "thread_local" version = "1.1.8" diff --git a/Cargo.toml b/Cargo.toml index 113d0661f3f..da012d258da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -523,8 +523,7 @@ shellexpand = "3.0.0" smallvec = "1" strum = { version = "0.26", default-features = false } syn = "2.0" -thiserror = "1.0" -thiserror-no-std = { version = "2.0.2", default-features = false } +thiserror = { version = "2.0.0", default-features = false } tracing = "0.1.0" tracing-appender = "0.2" url = "2.3" diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 9f7ce7ee8f3..60572b45979 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -23,7 +23,7 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } -thiserror-no-std = { workspace = true, default-features = false } +thiserror.workspace = true dyn-clone.workspace = true rustc-hash = { workspace = true, optional = true } @@ -56,7 +56,7 @@ serde = [ std = [ "alloy-chains/std", "alloy-primitives/std", - "thiserror-no-std/std", + "thiserror/std", "rustc-hash/std", "alloy-consensus/std", "once_cell/std", diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index b612f3b0b1a..ebc9fb10637 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -176,7 +176,7 @@ impl From for ForkId { } /// Reason for rejecting provided `ForkId`. -#[derive(Clone, Copy, Debug, thiserror_no_std::Error, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq, Eq, Hash)] pub enum ValidationError { /// Remote node is outdated and needs a software update. #[error( diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 9f212bf44e8..56f140afbda 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -34,7 +34,6 @@ derive_more.workspace = true rand = { workspace = true, features = ["small_rng"] } tempfile.workspace = true - [features] default = [] test-utils = [] diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index f69bb44a068..385e39357a0 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -90,6 +90,6 @@ pub enum NippyJarError { InconsistentState, /// A specified file is missing. - #[error("Missing file: {0}.")] + #[error("Missing file: {}", .0.display())] MissingFile(PathBuf), } From 4d65b2f5db29456eb90d43aac88e6edd645e1c3c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 11:50:24 +0100 Subject: [PATCH 773/970] chore: misc raw message (#13006) --- crates/net/eth-wire/src/capability.rs | 18 +++++++++++++++++- crates/net/network/src/message.rs | 2 +- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 625971e0e7b..3d8c61800eb 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -27,6 +27,22 @@ pub struct RawCapabilityMessage { pub payload: Bytes, } +impl RawCapabilityMessage { + /// Creates a new capability message with the given id and payload. + pub const fn new(id: usize, payload: Bytes) -> Self { + Self { id, payload } + } + + /// Creates a raw message for the eth sub-protocol. + /// + /// Caller must ensure that the rlp encoded `payload` matches the given `id`. + /// + /// See also [`EthMessage`] + pub const fn eth(id: EthMessageID, payload: Bytes) -> Self { + Self::new(id as usize, payload) + } +} + /// Various protocol related event types bubbled up from a session that need to be handled by the /// network. #[derive(Debug)] @@ -38,7 +54,7 @@ pub enum CapabilityMessage { serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") )] Eth(EthMessage), - /// Any other capability message. + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 199498b0b4c..e88ccb54c36 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -55,7 +55,7 @@ pub enum PeerMessage { PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. EthRequest(PeerRequest), - /// Other than eth namespace message + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } From b6ba822cc38a014590130f601892c7f54e950c16 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 11:51:44 +0100 Subject: [PATCH 774/970] chore: disable serde-with default features (#13002) --- Cargo.toml | 2 +- crates/evm/execution-types/Cargo.toml | 3 ++- crates/primitives-traits/Cargo.toml | 3 ++- crates/primitives/Cargo.toml | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index da012d258da..2543684166f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -517,7 +517,7 @@ rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" serde = { version = "1.0", default-features = false } serde_json = "1.0.94" -serde_with = "3.3.0" +serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" smallvec = "1" diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index c7fbad673db..c0ef2c5a694 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -63,5 +63,6 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", - "alloy-consensus/std", + "alloy-consensus/std", + "serde_with?/std" ] diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index df4491b2d12..b625dfcd017 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -61,7 +61,8 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "serde?/std" + "serde?/std", + "serde_with?/std" ] test-utils = [ "arbitrary", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 9787c9f3a6a..50f89dcf698 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -103,7 +103,8 @@ std = [ "revm-primitives/std", "secp256k1?/std", "serde/std", - "alloy-trie/std" + "alloy-trie/std", + "serde_with?/std" ] reth-codec = [ "dep:reth-codecs", From a01e0319e4b6b566453cfcbb6c5fea2fd0b01859 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 11:53:05 +0100 Subject: [PATCH 775/970] feat: add clone into consensus (#12999) --- crates/net/network/src/transactions/mod.rs | 2 +- crates/optimism/node/src/txpool.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc/src/txpool.rs | 4 ++-- crates/transaction-pool/src/maintain.rs | 2 +- crates/transaction-pool/src/traits.rs | 11 +++++++++++ crates/transaction-pool/src/validate/mod.rs | 9 ++++++++- 7 files changed, 25 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index d533aee102b..e87731a0fd5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1489,7 +1489,7 @@ impl PropagateTransaction { P: PoolTransaction>, { let size = tx.encoded_length(); - let transaction = tx.transaction.clone().into_consensus().into(); + let transaction = tx.transaction.clone_into_consensus().into(); let transaction = Arc::new(transaction); Self { size, transaction } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 6db5d69568b..6d269d361d8 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -142,7 +142,7 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - let tx: TransactionSigned = valid_tx.transaction().clone().into_consensus().into(); + let tx: TransactionSigned = valid_tx.transaction().clone_into_consensus().into(); tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 6ad8f8fd6ec..5bf6b0c02ba 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -239,7 +239,7 @@ pub trait EthTransactions: LoadTransaction { if let Some(tx) = RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { - let transaction = tx.transaction.clone().into_consensus(); + let transaction = tx.transaction.clone_into_consensus(); return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); } } diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 3e46183b466..442e28ffc4c 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -49,7 +49,7 @@ where { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone().into_consensus().into(), resp_builder)?, + from_recovered(tx.clone_into_consensus().into(), resp_builder)?, ); Ok(()) @@ -101,7 +101,7 @@ where inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx: TransactionSignedEcRecovered = tx.clone().into_consensus().into(); + let tx: TransactionSignedEcRecovered = tx.clone_into_consensus().into(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 02f218d4b09..6cf2faad9d6 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -602,7 +602,7 @@ where .into_iter() .map(|tx| { let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); + tx.transaction.clone_into_consensus().into(); recovered.into_signed() }) .collect::>(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index b5fc0db5204..6adb81729e1 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -979,6 +979,13 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { tx.try_into() } + /// Clone the transaction into a consensus variant. + /// + /// This method is preferred when the [`PoolTransaction`] already wraps the consensus variant. + fn clone_into_consensus(&self) -> Self::Consensus { + self.clone().into_consensus() + } + /// Define a method to convert from the `Self` type to `Consensus` fn into_consensus(self) -> Self::Consensus { self.into() @@ -1237,6 +1244,10 @@ impl PoolTransaction for EthPooledTransaction { type Pooled = PooledTransactionsElementEcRecovered; + fn clone_into_consensus(&self) -> Self::Consensus { + self.transaction().clone() + } + fn try_consensus_into_pooled( tx: Self::Consensus, ) -> Result { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 35e3a85537e..a93825212f8 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -375,6 +375,13 @@ impl ValidPoolTransaction { self.is_eip4844() != other.is_eip4844() } + /// Converts to this type into the consensus transaction of the pooled transaction. + /// + /// Note: this takes `&self` since indented usage is via `Arc`. + pub fn to_consensus(&self) -> T::Consensus { + self.transaction.clone_into_consensus() + } + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to /// an existing transaction in the pool. /// @@ -433,7 +440,7 @@ impl>> ValidPoo /// /// Note: this takes `&self` since indented usage is via `Arc`. pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { - self.transaction.clone().into_consensus().into() + self.to_consensus().into() } } From a8e2b77df5c2334f38c4c12a824fc8329ac5c813 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 29 Nov 2024 11:10:38 +0000 Subject: [PATCH 776/970] chore(trie): sparse trie trace logs and assertion messages (#12969) --- crates/engine/tree/src/tree/root.rs | 10 ++++++++++ crates/trie/sparse/src/state.rs | 7 +++++++ crates/trie/sparse/src/trie.rs | 25 +++++++++++++++++++------ 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 602e87a63db..31e79ca04b5 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -224,6 +224,7 @@ where for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); let info = if account.is_empty() { None } else { Some(account.info.into()) }; @@ -377,6 +378,8 @@ where "Processing calculated proof" ); + trace!(target: "engine::root", ?proof, "Proof calculated"); + if let Some((combined_proof, combined_state_update)) = self.on_proof(sequence_number, proof, state_update) { @@ -496,6 +499,7 @@ fn update_sparse_trie( targets: HashMap>, state: HashedPostState, ) -> SparseStateTrieResult<(Box, Duration)> { + trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); // Reveal new accounts and storage slots. @@ -503,18 +507,23 @@ fn update_sparse_trie( // Update storage slots with new values and calculate storage roots. for (address, storage) in state.storages { + trace!(target: "engine::root::sparse", ?address, "Updating storage"); let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; if storage.wiped { + trace!(target: "engine::root::sparse", ?address, "Wiping storage"); storage_trie.wipe(); } for (slot, value) in storage.storage { let slot_nibbles = Nibbles::unpack(slot); if value.is_zero() { + trace!(target: "engine::root::sparse", ?address, ?slot, "Removing storage slot"); + // TODO: handle blinded node error storage_trie.remove_leaf(&slot_nibbles)?; } else { + trace!(target: "engine::root::sparse", ?address, ?slot, "Updating storage slot"); storage_trie .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; } @@ -525,6 +534,7 @@ fn update_sparse_trie( // Update accounts with new values for (address, account) in state.accounts { + trace!(target: "engine::root::sparse", ?address, "Updating account"); trie.update_account(address, account.unwrap_or_default())?; } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 549a86733f8..6444c7cd2c4 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -7,6 +7,7 @@ use alloy_primitives::{ }; use alloy_rlp::{Decodable, Encodable}; use reth_primitives_traits::Account; +use reth_tracing::tracing::trace; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, @@ -149,6 +150,7 @@ impl SparseStateTrie { // Reveal the remaining proof nodes. for (path, bytes) in account_nodes { let node = TrieNode::decode(&mut &bytes[..])?; + trace!(target: "trie::sparse", ?path, ?node, "Revealing account node"); trie.reveal_node(path, node)?; } } @@ -168,6 +170,7 @@ impl SparseStateTrie { // Reveal the remaining proof nodes. for (path, bytes) in storage_nodes { let node = TrieNode::decode(&mut &bytes[..])?; + trace!(target: "trie::sparse", ?account, ?path, ?node, "Revealing storage node"); trie.reveal_node(path, node)?; } } @@ -209,8 +212,10 @@ impl SparseStateTrie { pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { let nibbles = Nibbles::unpack(address); let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); storage_trie.root().ok_or(SparseTrieError::Blind)? } else if self.revealed.contains_key(&address) { + trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; // The account was revealed, either... if let Some(value) = state.get_leaf_value(&nibbles) { @@ -225,8 +230,10 @@ impl SparseStateTrie { }; if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + trace!(target: "trie::sparse", ?address, "Removing account"); self.remove_account_leaf(&nibbles) } else { + trace!(target: "trie::sparse", ?address, "Updating account"); self.account_rlp_buf.clear(); TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 97446680df4..12a0f87e129 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ B256, }; use alloy_rlp::Decodable; -use reth_tracing::tracing::debug; +use reth_tracing::tracing::trace; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, @@ -371,7 +371,7 @@ impl RevealedSparseTrie { // in `nodes`, but not in the `values`. let mut removed_nodes = self.take_nodes_for_path(path)?; - debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. let mut child = removed_nodes.pop().expect("leaf exists"); #[cfg(debug_assertions)] @@ -459,7 +459,7 @@ impl RevealedSparseTrie { // Remove the only child node. let child = self.nodes.get(&child_path).unwrap(); - debug!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); let mut delete_child = false; let new_node = match child { @@ -520,7 +520,7 @@ impl RevealedSparseTrie { node: new_node.clone(), unset_branch_nibble: None, }; - debug!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); self.nodes.insert(removed_path, new_node); } @@ -561,7 +561,13 @@ impl RevealedSparseTrie { { let mut current = current.clone(); current.extend_from_slice_unchecked(key); - assert!(path.starts_with(¤t)); + assert!( + path.starts_with(¤t), + "path: {:?}, current: {:?}, key: {:?}", + path, + current, + key + ); } let path = current.clone(); @@ -570,7 +576,14 @@ impl RevealedSparseTrie { } SparseNode::Branch { state_mask, .. } => { let nibble = path[current.len()]; - debug_assert!(state_mask.is_bit_set(nibble)); + debug_assert!( + state_mask.is_bit_set(nibble), + "current: {:?}, path: {:?}, nibble: {:?}, state_mask: {:?}", + current, + path, + nibble, + state_mask + ); // If the branch node has a child that is a leaf node that we're removing, // we need to unset this nibble. From 1f1671ad8cfc857126dc2d61ef9e35800358f06a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 16:23:16 +0400 Subject: [PATCH 777/970] feat: `SerdeBincodeCompat` trait (#12991) --- crates/evm/execution-types/src/chain.rs | 43 +++++++++----- crates/exex/types/Cargo.toml | 1 + crates/exex/types/src/notification.rs | 27 ++++++--- crates/primitives-traits/src/block/body.rs | 6 +- crates/primitives-traits/src/block/header.rs | 4 +- crates/primitives-traits/src/header/sealed.rs | 23 +++++--- crates/primitives-traits/src/lib.rs | 17 +++++- .../src/serde_bincode_compat.rs | 14 +++++ crates/primitives/src/block.rs | 58 ++++++++++++++----- 9 files changed, 143 insertions(+), 50 deletions(-) create mode 100644 crates/primitives-traits/src/serde_bincode_compat.rs diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 1767a7f43f6..20bf5c6d24d 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -525,7 +525,9 @@ pub(super) mod serde_bincode_compat { use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; - use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; + use reth_primitives::{ + serde_bincode_compat::SealedBlockWithSenders, EthPrimitives, NodePrimitives, + }; use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -547,18 +549,24 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct Chain<'a> { - blocks: SealedBlocksWithSenders<'a>, - execution_outcome: Cow<'a, ExecutionOutcome>, + pub struct Chain<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + blocks: SealedBlocksWithSenders<'a, N::Block>, + execution_outcome: Cow<'a, ExecutionOutcome>, trie_updates: Option>, } #[derive(Debug)] - struct SealedBlocksWithSenders<'a>( - Cow<'a, BTreeMap>, + struct SealedBlocksWithSenders<'a, B: reth_primitives_traits::Block>( + Cow<'a, BTreeMap>>, ); - impl Serialize for SealedBlocksWithSenders<'_> { + impl Serialize for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -573,20 +581,26 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> Deserialize<'de> for SealedBlocksWithSenders<'_> { + impl<'de, B> Deserialize<'de> for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { Ok(Self(Cow::Owned( - BTreeMap::>::deserialize(deserializer) + BTreeMap::>::deserialize(deserializer) .map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?, ))) } } - impl<'a> From<&'a super::Chain> for Chain<'a> { - fn from(value: &'a super::Chain) -> Self { + impl<'a, N> From<&'a super::Chain> for Chain<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::Chain) -> Self { Self { blocks: SealedBlocksWithSenders(Cow::Borrowed(&value.blocks)), execution_outcome: Cow::Borrowed(&value.execution_outcome), @@ -595,8 +609,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::Chain { - fn from(value: Chain<'a>) -> Self { + impl<'a, N> From> for super::Chain + where + N: NodePrimitives, + { + fn from(value: Chain<'a, N>) -> Self { Self { blocks: value.blocks.0.into_owned(), execution_outcome: value.execution_outcome.into_owned(), diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 3b67fd5aa50..4d99bd7e657 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives.workspace = true reth-primitives-traits.workspace = true # reth diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index fb0762f04b3..5ded40d061b 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -76,6 +76,7 @@ pub(super) mod serde_bincode_compat { use std::sync::Arc; use reth_execution_types::serde_bincode_compat::Chain; + use reth_primitives::{EthPrimitives, NodePrimitives}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -96,14 +97,21 @@ pub(super) mod serde_bincode_compat { /// ``` #[derive(Debug, Serialize, Deserialize)] #[allow(missing_docs)] - pub enum ExExNotification<'a> { - ChainCommitted { new: Chain<'a> }, - ChainReorged { old: Chain<'a>, new: Chain<'a> }, - ChainReverted { old: Chain<'a> }, + #[serde(bound = "")] + pub enum ExExNotification<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + ChainCommitted { new: Chain<'a, N> }, + ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> }, + ChainReverted { old: Chain<'a, N> }, } - impl<'a> From<&'a super::ExExNotification> for ExExNotification<'a> { - fn from(value: &'a super::ExExNotification) -> Self { + impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::ExExNotification) -> Self { match value { super::ExExNotification::ChainCommitted { new } => { ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } @@ -121,8 +129,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::ExExNotification { - fn from(value: ExExNotification<'a>) -> Self { + impl<'a, N> From> for super::ExExNotification + where + N: NodePrimitives, + { + fn from(value: ExExNotification<'a, N>) -> Self { match value { ExExNotification::ChainCommitted { new } => { Self::ChainCommitted { new: Arc::new(new.into()) } diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 76bf916add9..7491c75faf9 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -4,7 +4,10 @@ use alloc::{fmt, vec::Vec}; use alloy_eips::eip4895::Withdrawals; -use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, SignedTransaction}; +use crate::{ + FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, + SignedTransaction, +}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -26,6 +29,7 @@ pub trait BlockBody: + InMemorySize + MaybeSerde + MaybeArbitrary + + MaybeSerdeBincodeCompat { /// Ordered list of signed transactions as committed in block. type Transaction: SignedTransaction; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 26806808532..e03535dd308 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::Sealable; -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. @@ -29,6 +29,7 @@ pub trait BlockHeader: + InMemorySize + MaybeSerde + MaybeArbitrary + + MaybeSerdeBincodeCompat { } @@ -48,5 +49,6 @@ impl BlockHeader for T where + InMemorySize + MaybeSerde + MaybeArbitrary + + MaybeSerdeBincodeCompat { } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 08add0ac3c1..f167ffbf284 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -173,11 +173,12 @@ where /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use alloy_consensus::serde_bincode_compat::Header; use alloy_primitives::BlockHash; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use crate::serde_bincode_compat::SerdeBincodeCompat; + /// Bincode-compatible [`super::SealedHeader`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -193,20 +194,21 @@ pub(super) mod serde_bincode_compat { /// header: SealedHeader, /// } /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct SealedHeader<'a> { + #[derive(derive_more::Debug, Serialize, Deserialize)] + #[debug(bound(H::BincodeRepr<'a>: core::fmt::Debug))] + pub struct SealedHeader<'a, H: SerdeBincodeCompat = super::Header> { hash: BlockHash, - header: Header<'a>, + header: H::BincodeRepr<'a>, } - impl<'a> From<&'a super::SealedHeader> for SealedHeader<'a> { - fn from(value: &'a super::SealedHeader) -> Self { - Self { hash: value.hash, header: Header::from(&value.header) } + impl<'a, H: SerdeBincodeCompat> From<&'a super::SealedHeader> for SealedHeader<'a, H> { + fn from(value: &'a super::SealedHeader) -> Self { + Self { hash: value.hash, header: (&value.header).into() } } } - impl<'a> From> for super::SealedHeader { - fn from(value: SealedHeader<'a>) -> Self { + impl<'a, H: SerdeBincodeCompat> From> for super::SealedHeader { + fn from(value: SealedHeader<'a, H>) -> Self { Self { hash: value.hash, header: value.header.into() } } } @@ -229,6 +231,9 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::SealedHeader { + type BincodeRepr<'a> = SealedHeader<'a, H>; + } #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 338f8f621e1..5bdfd01eb9c 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -68,9 +68,7 @@ pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; /// /// Read more: #[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; -} +pub mod serde_bincode_compat; /// Heuristic size trait pub mod size; @@ -118,3 +116,16 @@ pub trait MaybeCompact {} impl MaybeCompact for T where T: reth_codecs::Compact {} #[cfg(not(feature = "reth-codec"))] impl MaybeCompact for T {} + +/// Helper trait that requires serde bincode compatibility implementation. +#[cfg(feature = "serde-bincode-compat")] +pub trait MaybeSerdeBincodeCompat: crate::serde_bincode_compat::SerdeBincodeCompat {} +/// Noop. Helper trait that would require serde bincode compatibility implementation if +/// `serde-bincode-compat` feature were enabled. +#[cfg(not(feature = "serde-bincode-compat"))] +pub trait MaybeSerdeBincodeCompat {} + +#[cfg(feature = "serde-bincode-compat")] +impl MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {} +#[cfg(not(feature = "serde-bincode-compat"))] +impl MaybeSerdeBincodeCompat for T {} diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs new file mode 100644 index 00000000000..a1f7d42569e --- /dev/null +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -0,0 +1,14 @@ +use core::fmt::Debug; + +pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; +use serde::{de::DeserializeOwned, Serialize}; + +/// Trait for types that can be serialized and deserialized using bincode. +pub trait SerdeBincodeCompat: Sized + 'static { + /// Serde representation of the type for bincode serialization. + type BincodeRepr<'a>: Debug + Serialize + DeserializeOwned + From<&'a Self> + Into; +} + +impl SerdeBincodeCompat for alloy_consensus::Header { + type BincodeRepr<'a> = alloy_consensus::serde_bincode_compat::Header<'a>; +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5618d81bd8f..c4905458c75 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -744,7 +744,7 @@ pub(super) mod serde_bincode_compat { use alloy_consensus::serde_bincode_compat::Header; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Address; - use reth_primitives_traits::serde_bincode_compat::SealedHeader; + use reth_primitives_traits::serde_bincode_compat::{SealedHeader, SerdeBincodeCompat}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -810,6 +810,10 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::BlockBody { + type BincodeRepr<'a> = BlockBody<'a>; + } + /// Bincode-compatible [`super::SealedBlock`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -826,19 +830,34 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlock<'a> { - header: SealedHeader<'a>, - body: BlockBody<'a>, + pub struct SealedBlock<'a, H = super::Header, B = super::BlockBody> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + header: SealedHeader<'a, H>, + body: B::BincodeRepr<'a>, } - impl<'a> From<&'a super::SealedBlock> for SealedBlock<'a> { - fn from(value: &'a super::SealedBlock) -> Self { - Self { header: SealedHeader::from(&value.header), body: BlockBody::from(&value.body) } + impl<'a, H, B> From<&'a super::SealedBlock> for SealedBlock<'a, H, B> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: &'a super::SealedBlock) -> Self { + Self { + header: SealedHeader::from(&value.header), + body: B::BincodeRepr::from(&value.body), + } } } - impl<'a> From> for super::SealedBlock { - fn from(value: SealedBlock<'a>) -> Self { + impl<'a, H, B> From> for super::SealedBlock + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: SealedBlock<'a, H, B>) -> Self { Self { header: value.header.into(), body: value.body.into() } } } @@ -877,19 +896,28 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlockWithSenders<'a> { - block: SealedBlock<'a>, + pub struct SealedBlockWithSenders<'a, B = super::Block> + where + B: reth_primitives_traits::Block, + { + block: SealedBlock<'a, B::Header, B::Body>, senders: Cow<'a, Vec
>, } - impl<'a> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a> { - fn from(value: &'a super::SealedBlockWithSenders) -> Self { + impl<'a, B> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a, B> + where + B: reth_primitives_traits::Block, + { + fn from(value: &'a super::SealedBlockWithSenders) -> Self { Self { block: SealedBlock::from(&value.block), senders: Cow::Borrowed(&value.senders) } } } - impl<'a> From> for super::SealedBlockWithSenders { - fn from(value: SealedBlockWithSenders<'a>) -> Self { + impl<'a, B> From> for super::SealedBlockWithSenders + where + B: reth_primitives_traits::Block, + { + fn from(value: SealedBlockWithSenders<'a, B>) -> Self { Self { block: value.block.into(), senders: value.senders.into_owned() } } } From b6b8c474abd339d301f33c1d20051656b11a3a34 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 16:41:46 +0400 Subject: [PATCH 778/970] feat: on-disk reorg E2E test (#12977) --- crates/e2e-test-utils/src/lib.rs | 14 ++- crates/e2e-test-utils/src/node.rs | 60 +++++++++- crates/ethereum/node/tests/e2e/p2p.rs | 143 +++++++++--------------- crates/ethereum/node/tests/e2e/utils.rs | 131 +++++++++++++++++++++- crates/optimism/node/src/utils.rs | 8 +- 5 files changed, 251 insertions(+), 105 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 15065377fab..72d912d6b54 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -53,7 +53,7 @@ pub async fn setup( chain_spec: Arc, is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, -) -> eyre::Result<(Vec>, TaskManager, Wallet)> +) -> eyre::Result<(Vec>, TaskManager, Wallet)> where N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< @@ -115,7 +115,7 @@ pub async fn setup_engine( is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<( - Vec>>>, + Vec>>>, TaskManager, Wallet, )> @@ -183,6 +183,9 @@ where let mut node = NodeTestContext::new(node, attributes_generator).await?; + let genesis = node.block_hash(0); + node.engine_api.update_forkchoice(genesis, genesis).await?; + // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { previous_node.connect(&mut node).await; @@ -203,7 +206,8 @@ where // Type aliases -type TmpDB = Arc>; +/// Testing database +pub type TmpDB = Arc>; type TmpNodeAdapter>> = FullNodeTypesAdapter, Provider>; @@ -216,5 +220,5 @@ pub type Adapter; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType>> = - NodeTestContext, AO>; +pub type NodeHelperType>> = + NodeTestContext, >>::AddOns>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index b3eb641c137..dcd24df5c7a 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -3,6 +3,7 @@ use crate::{ rpc::RpcTestContext, traits::PayloadEnvelopeExt, }; use alloy_consensus::BlockHeader; +use alloy_eips::BlockId; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use alloy_rpc_types_engine::PayloadStatusEnum; use alloy_rpc_types_eth::BlockNumberOrTag; @@ -134,8 +135,8 @@ where Ok((self.payload.expect_built_payload().await?, eth_attr)) } - /// Advances the node forward one block - pub async fn advance_block( + /// Triggers payload building job and submits it to the engine. + pub async fn build_and_submit_payload( &mut self, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where @@ -146,13 +147,27 @@ where { let (payload, eth_attr) = self.new_payload().await?; - let block_hash = self - .engine_api + self.engine_api .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) .await?; + Ok((payload, eth_attr)) + } + + /// Advances the node forward one block + pub async fn advance_block( + &mut self, + ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> + where + ::ExecutionPayloadEnvelopeV3: + From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: + From + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.build_and_submit_payload().await?; + // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash, block_hash).await?; + self.engine_api.update_forkchoice(payload.block().hash(), payload.block().hash()).await?; Ok((payload, eth_attr)) } @@ -238,6 +253,41 @@ where Ok(()) } + /// Gets block hash by number. + pub fn block_hash(&self, number: u64) -> BlockHash { + self.inner + .provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Number(number)) + .unwrap() + .unwrap() + .hash() + } + + /// Sends FCU and waits for the node to sync to the given block. + pub async fn sync_to(&self, block: BlockHash) -> eyre::Result<()> { + self.engine_api.update_forkchoice(block, block).await?; + + let start = std::time::Instant::now(); + + while self + .inner + .provider + .sealed_header_by_id(BlockId::Number(BlockNumberOrTag::Latest))? + .is_none_or(|h| h.hash() != block) + { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); + } + + // Hack to make sure that all components have time to process canonical state update. + // Otherwise, this might result in e.g "nonce too low" errors when advancing chain further, + // making tests flaky. + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + Ok(()) + } + /// Returns the RPC URL. pub fn rpc_url(&self) -> Url { let addr = self.inner.rpc_server_handle().http_local_addr().unwrap(); diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index f8680f47ae3..343521ef8eb 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,19 +1,9 @@ -use crate::utils::eth_payload_attributes; -use alloy_consensus::TxType; -use alloy_primitives::bytes; -use alloy_provider::{ - network::{ - Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, - }, - Provider, ProviderBuilder, SendableTx, -}; -use alloy_rpc_types_eth::TransactionRequest; -use alloy_signer::SignerSync; -use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use crate::utils::{advance_with_random_transactions, eth_payload_attributes}; +use alloy_provider::{Provider, ProviderBuilder}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; -use revm::primitives::{AccessListItem, Authorization}; use std::sync::Arc; #[tokio::test] @@ -76,80 +66,12 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, wallet) = + let (mut nodes, _tasks, _) = setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; let mut node = nodes.pop().unwrap(); - let signers = wallet.gen(); let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); - // simple contract which writes to storage on any call - let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); - let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); - - // Produce 100 random blocks with random transactions - for _ in 0..100 { - let tx_count = rng.gen_range(1..20); - - let mut pending = vec![]; - for _ in 0..tx_count { - let signer = signers.choose(&mut rng).unwrap(); - let tx_type = TxType::try_from(rng.gen_range(0..=4)).unwrap(); - - let mut tx = TransactionRequest::default().with_from(signer.address()); - - let should_create = - rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; - if should_create { - tx = tx.into_create().with_input(dummy_bytecode.clone()); - } else { - tx = tx.with_to(*call_destinations.choose(&mut rng).unwrap()).with_input( - (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), - ); - } - - if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { - tx = tx.with_gas_price(provider.get_gas_price().await?); - } - - if rng.gen::() || tx_type == TxType::Eip2930 { - tx = tx.with_access_list( - vec![AccessListItem { - address: *call_destinations.choose(&mut rng).unwrap(), - storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), - }] - .into(), - ); - } - - if tx_type == TxType::Eip7702 { - let signer = signers.choose(&mut rng).unwrap(); - let auth = Authorization { - chain_id: provider.get_chain_id().await?, - address: *call_destinations.choose(&mut rng).unwrap(), - nonce: provider.get_transaction_count(signer.address()).await?, - }; - let sig = signer.sign_hash_sync(&auth.signature_hash())?; - tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) - } - - let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; - let tx = - NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) - .await?; - - pending.push(provider.send_tx_envelope(tx).await?); - } - - let (payload, _) = node.advance_block().await?; - assert!(payload.block().raw_transactions().len() == tx_count); - - for pending in pending { - let receipt = pending.get_receipt().await?; - if let Some(address) = receipt.contract_address { - call_destinations.push(address); - } - } - } + advance_with_random_transactions(&mut node, 100, &mut rng, true).await?; let second_node = nodes.pop().unwrap(); let second_provider = @@ -159,15 +81,58 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { let head = provider.get_block_by_number(Default::default(), false.into()).await?.unwrap().header.hash; - second_node.engine_api.update_forkchoice(head, head).await?; - let start = std::time::Instant::now(); + second_node.sync_to(head).await?; - while provider.get_block_number().await? != second_provider.get_block_number().await? { - tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) +} + +#[tokio::test] +async fn test_long_reorg() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, _) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + + let mut first_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); + + let first_provider = ProviderBuilder::new().on_http(first_node.rpc_url()); + + // Advance first node 100 blocks. + advance_with_random_transactions(&mut first_node, 100, &mut rng, false).await?; + + // Sync second node to 20th block. + let head = first_provider.get_block_by_number(20.into(), false.into()).await?.unwrap(); + second_node.sync_to(head.header.hash).await?; + + // Produce a fork chain with blocks 21.60 + second_node.payload.timestamp = head.header.timestamp; + advance_with_random_transactions(&mut second_node, 40, &mut rng, true).await?; + + // Reorg first node from 100th block to new 60th block. + first_node.sync_to(second_node.block_hash(60)).await?; + + // Advance second node 20 blocks and ensure that first node is able to follow it. + advance_with_random_transactions(&mut second_node, 20, &mut rng, true).await?; + first_node.sync_to(second_node.block_hash(80)).await?; - assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); - } + // Ensure that it works the other way around too. + advance_with_random_transactions(&mut first_node, 20, &mut rng, true).await?; + second_node.sync_to(first_node.block_hash(100)).await?; Ok(()) } diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index c3743de185f..ee451b8f3c5 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,6 +1,22 @@ -use alloy_primitives::{Address, B256}; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{bytes, Address, B256}; +use alloy_provider::{ + network::{ + Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, + }, + Provider, ProviderBuilder, SendableTx, +}; use alloy_rpc_types_engine::PayloadAttributes; +use alloy_rpc_types_eth::TransactionRequest; +use alloy_signer::SignerSync; +use rand::{seq::SliceRandom, Rng}; +use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; +use reth_node_api::NodeTypesWithDBAdapter; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::TxType; +use reth_provider::FullProvider; +use revm::primitives::{AccessListItem, Authorization}; /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { @@ -13,3 +29,116 @@ pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttribu }; EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } + +/// Advances node by producing blocks with random transactions. +pub(crate) async fn advance_with_random_transactions( + node: &mut NodeHelperType, + num_blocks: usize, + rng: &mut impl Rng, + finalize: bool, +) -> eyre::Result<()> +where + Provider: FullProvider>, +{ + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + let signers = Wallet::new(1).with_chain_id(provider.get_chain_id().await?).gen(); + + // simple contract which writes to storage on any call + let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); + let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); + + for _ in 0..num_blocks { + let tx_count = rng.gen_range(1..20); + + let mut pending = vec![]; + for _ in 0..tx_count { + let signer = signers.choose(rng).unwrap(); + let tx_type = TxType::try_from(rng.gen_range(0..=4) as u64).unwrap(); + + let nonce = provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?; + + let mut tx = + TransactionRequest::default().with_from(signer.address()).with_nonce(nonce); + + let should_create = + rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; + if should_create { + tx = tx.into_create().with_input(dummy_bytecode.clone()); + } else { + tx = tx.with_to(*call_destinations.choose(rng).unwrap()).with_input( + (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), + ); + } + + if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { + tx = tx.with_gas_price(provider.get_gas_price().await?); + } + + if rng.gen::() || tx_type == TxType::Eip2930 { + tx = tx.with_access_list( + vec![AccessListItem { + address: *call_destinations.choose(rng).unwrap(), + storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), + }] + .into(), + ); + } + + if tx_type == TxType::Eip7702 { + let signer = signers.choose(rng).unwrap(); + let auth = Authorization { + chain_id: provider.get_chain_id().await?, + address: *call_destinations.choose(rng).unwrap(), + nonce: provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?, + }; + let sig = signer.sign_hash_sync(&auth.signature_hash())?; + tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) + } + + let gas = provider + .estimate_gas(&tx) + .block(BlockId::Number(BlockNumberOrTag::Pending)) + .await + .unwrap_or(1_000_000); + + tx.set_gas_limit(gas); + + let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; + let tx = + NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) + .await?; + + pending.push(provider.send_tx_envelope(tx).await?); + } + + let (payload, _) = node.build_and_submit_payload().await?; + if finalize { + node.engine_api + .update_forkchoice(payload.block().hash(), payload.block().hash()) + .await?; + } else { + let last_safe = provider + .get_block_by_number(BlockNumberOrTag::Safe, false.into()) + .await? + .unwrap() + .header + .hash; + node.engine_api.update_forkchoice(last_safe, payload.block().hash()).await?; + } + + for pending in pending { + let receipt = pending.get_receipt().await?; + if let Some(address) = receipt.contract_address { + call_destinations.push(address); + } + } + } + + Ok(()) +} diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index e70e3503198..9cadcdcf7a1 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,10 +1,8 @@ -use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; +use crate::{OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::PayloadAttributes; -use reth_e2e_test_utils::{ - transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, -}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_tasks::TaskManager; @@ -12,7 +10,7 @@ use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; +pub(crate) type OpNode = NodeHelperType; /// Creates the initial setup with `num_nodes` of the node config, started and connected. pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { From 29289ccaec2685750c2a1239c594ec3c80516b8e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 16:53:25 +0400 Subject: [PATCH 779/970] feat: add `Primitives` AT to `BlockExecutorProvider` (#12994) --- book/sources/exex/hello-world/src/bin/3.rs | 4 +- book/sources/exex/remote/src/exex.rs | 4 +- book/sources/exex/remote/src/exex_4.rs | 4 +- book/sources/exex/tracking-state/src/bin/1.rs | 6 +- book/sources/exex/tracking-state/src/bin/2.rs | 6 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 18 +- crates/blockchain-tree/src/shareable.rs | 6 +- crates/cli/commands/src/import.rs | 4 +- .../cli/commands/src/stage/dump/execution.rs | 4 +- crates/cli/commands/src/stage/dump/mod.rs | 2 +- crates/cli/commands/src/stage/mod.rs | 2 +- crates/cli/commands/src/stage/run.rs | 2 +- crates/e2e-test-utils/src/rpc.rs | 6 +- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 4 +- crates/ethereum/evm/src/execute.rs | 9 +- crates/ethereum/node/src/node.rs | 2 +- crates/evm/src/either.rs | 47 ++--- crates/evm/src/execute.rs | 133 +++++++------ crates/evm/src/metrics.rs | 24 ++- crates/evm/src/noop.rs | 4 +- crates/evm/src/test_utils.rs | 24 +-- crates/exex/exex/src/backfill/job.rs | 39 ++-- crates/exex/exex/src/backfill/stream.rs | 43 ++--- crates/exex/exex/src/context.rs | 20 +- crates/exex/exex/src/dyn_context.rs | 17 +- crates/exex/exex/src/manager.rs | 175 ++++++++++++------ crates/exex/exex/src/notifications.rs | 112 ++++++----- crates/exex/exex/src/wal/cache.rs | 14 +- crates/exex/exex/src/wal/mod.rs | 41 ++-- crates/exex/exex/src/wal/storage.rs | 26 ++- crates/exex/test-utils/src/lib.rs | 2 +- crates/exex/types/src/notification.rs | 16 +- crates/node/api/src/node.rs | 2 +- crates/node/builder/src/components/builder.rs | 4 +- crates/node/builder/src/components/execute.rs | 7 +- crates/node/builder/src/components/mod.rs | 6 +- crates/node/builder/src/setup.rs | 8 +- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/execute.rs | 6 +- crates/optimism/node/src/node.rs | 2 +- crates/revm/src/batch.rs | 20 +- crates/rpc/rpc-builder/src/lib.rs | 33 +++- crates/rpc/rpc/src/debug.rs | 16 +- crates/rpc/rpc/src/validation.rs | 22 ++- crates/stages/stages/src/sets.rs | 3 +- crates/stages/stages/src/stages/execution.rs | 29 +-- .../custom-beacon-withdrawals/src/main.rs | 9 +- examples/custom-evm/src/main.rs | 2 +- examples/stateful-precompile/src/main.rs | 4 +- 52 files changed, 591 insertions(+), 409 deletions(-) diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index ebeaf6c84f1..9b429d3eb08 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,10 @@ use futures_util::TryStreamExt; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex>>( +async fn my_exex>>( mut ctx: ExExContext, ) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 00392b4dad1..c823d98ded4 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,7 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; -use reth::{primitives::Block, providers::BlockReader}; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -45,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex>>( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index c37f26d739d..8286c028934 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,7 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; -use reth::{primitives::Block, providers::BlockReader}; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -47,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex>>( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 2cf43bec3a1..b1a8609b727 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,9 @@ struct MyExEx { ctx: ExExContext, } -impl>> Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index b58d2a39c85..7e9aadf8a04 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,9 @@ impl MyExEx { } } -impl>> Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index bbf1cb09961..ec9beb20a07 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 6ac39c31670..ba4f91d9c79 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -17,7 +17,7 @@ use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, + providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, DBProvider, FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, }; @@ -76,8 +76,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -114,8 +114,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -177,8 +177,8 @@ impl AppendableChain { ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> where EDP: FullExecutionDataProvider, - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -284,8 +284,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 484b4b51869..e668f4e2dac 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -39,7 +39,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -110,7 +110,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -173,7 +173,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index c1f6408b49b..dc99ae7f98d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -60,7 +60,7 @@ impl> ImportComm pub async fn execute(self, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); @@ -169,7 +169,7 @@ pub fn build_import_pipeline( where N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { eyre::bail!("unable to import non canonical blocks"); diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 000c1b542db..70fd23f9847 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -33,7 +33,7 @@ where Receipt = reth_primitives::Receipt, >, >, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -188,7 +188,7 @@ where Receipt = reth_primitives::Receipt, >, >, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 36b8fb12258..9cc0f54dd33 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -93,7 +93,7 @@ impl> Command pub async fn execute(self, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index b9e0725428a..91ab458daf6 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -44,7 +44,7 @@ impl> Command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { match self.command { diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index c852eea05a7..88a5fa6204e 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -107,7 +107,7 @@ impl> Command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { // Raise the fd limit of the process. diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 8399a482dfd..37ee12987ca 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,7 +1,6 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; -use alloy_rlp::Encodable; use reth_chainspec::EthereumHardforks; use reth_node_api::{FullNodeComponents, NodePrimitives}; use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; @@ -21,7 +20,10 @@ where Node: FullNodeComponents< Types: NodeTypes< ChainSpec: EthereumHardforks, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >, EthApi: EthApiSpec + EthTransactions + TraceExt, diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 5838cb89116..79d9d844d73 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -65,7 +65,7 @@ where #[allow(clippy::too_many_arguments)] pub fn new( consensus: Arc, - executor_factory: impl BlockExecutorProvider, + executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index a54a2ef9e1a..8bb26d69140 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -60,7 +60,7 @@ impl EngineService where N: EngineNodeTypes + PersistenceNodeTypes, Client: EthBlockClient + 'static, - E: BlockExecutorProvider + 'static, + E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index fd0e5aeec83..7b8ec883892 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -540,7 +540,7 @@ impl std::fmt::Debug impl EngineApiTreeHandler where - N: NodePrimitives, + N: NodePrimitives, P: DatabaseProviderFactory + BlockReader + StateProviderFactory @@ -548,7 +548,7 @@ where + Clone + 'static,

::Provider: BlockReader, - E: BlockExecutorProvider, + E: BlockExecutorProvider, T: EngineTypes, V: EngineValidator, { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 8642df89698..f2a3925572b 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -20,7 +20,7 @@ use reth_evm::{ system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, TxEnvOverrides, }; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, @@ -60,6 +60,8 @@ where EvmConfig: Clone + Unpin + Sync + Send + 'static + ConfigureEvm

, { + type Primitives = EthPrimitives; + type Strategy + Display>> = EthExecutionStrategy; @@ -122,13 +124,16 @@ where } } -impl BlockExecutionStrategy for EthExecutionStrategy +impl BlockExecutionStrategy for EthExecutionStrategy where DB: Database + Display>, EvmConfig: ConfigureEvm
, { + type DB = DB; type Error = BlockExecutionError; + type Primitives = EthPrimitives; + fn init(&mut self, tx_env_overrides: Box) { self.tx_env_overrides = Some(tx_env_overrides); } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a536b9dff90..9db0c44c6c6 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -133,7 +133,7 @@ pub struct EthereumExecutorBuilder; impl ExecutorBuilder for EthereumExecutorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type EVM = EthEvmConfig; diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 85bc7e7f9a7..4faeb1a7203 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -8,9 +8,6 @@ use crate::{ }; use alloc::boxed::Box; use alloy_primitives::BlockNumber; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; @@ -22,8 +19,10 @@ use revm::State; impl BlockExecutorProvider for Either where A: BlockExecutorProvider, - B: BlockExecutorProvider, + B: BlockExecutorProvider, { + type Primitives = A::Primitives; + type Executor + Display>> = Either, B::Executor>; @@ -53,23 +52,13 @@ where impl Executor for Either where - A: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, - B: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, + A: Executor, + B: for<'a> Executor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; fn init(&mut self, tx_env_overrides: Box) { match self { @@ -116,23 +105,13 @@ where impl BatchExecutor for Either where - A: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, - B: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, + A: BatchExecutor, + B: for<'a> BatchExecutor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 42c756f4d93..bc6e535b7b7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,19 +1,21 @@ //! Traits for execution. +use alloy_consensus::BlockHeader; // Re-export execution types pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; +use reth_primitives_traits::Block as _; pub use reth_storage_errors::provider::ProviderError; use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; -use core::{fmt::Display, marker::PhantomData}; +use core::fmt::Display; use reth_consensus::ConsensusError; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{ @@ -130,6 +132,9 @@ pub trait BatchExecutor { /// A type that can create a new executor for block execution. pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { + /// Receipt type. + type Primitives: NodePrimitives; + /// An executor that can execute a single block given a database. /// /// # Verification @@ -143,16 +148,22 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// the returned state. type Executor + Display>>: for<'a> Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = BlockExecutionOutput<::Receipt>, Error = BlockExecutionError, >; /// An executor that can execute a batch of blocks given a database. type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = ExecutionOutcome<::Receipt>, Error = BlockExecutionError, >; @@ -174,18 +185,21 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// Helper type for the output of executing a block. #[derive(Debug, Clone)] -pub struct ExecuteOutput { +pub struct ExecuteOutput { /// Receipts obtained after executing a block. - pub receipts: Vec, + pub receipts: Vec, /// Cumulative gas used in the block execution. pub gas_used: u64, } /// Defines the strategy for executing a single block. -pub trait BlockExecutionStrategy -where - DB: Database, -{ +pub trait BlockExecutionStrategy { + /// Database this strategy operates on. + type DB: Database; + + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// The error type returned by this strategy's methods. type Error: From + core::error::Error; @@ -195,30 +209,30 @@ where /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, ) -> Result<(), Self::Error>; /// Executes all transactions in the block. fn execute_transactions( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - ) -> Result; + ) -> Result::Receipt>, Self::Error>; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - receipts: &[Receipt], + receipts: &[::Receipt], ) -> Result; /// Returns a reference to the current state. - fn state_ref(&self) -> &State; + fn state_ref(&self) -> &State; /// Returns a mutable reference to the current state. - fn state_mut(&mut self) -> &mut State; + fn state_mut(&mut self) -> &mut State; /// Sets a hook to be called after each state change during execution. fn with_state_hook(&mut self, _hook: Option>) {} @@ -232,8 +246,8 @@ where /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - _block: &BlockWithSenders, - _receipts: &[Receipt], + _block: &BlockWithSenders<::Block>, + _receipts: &[::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { Ok(()) @@ -242,9 +256,13 @@ where /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// Associated strategy type. type Strategy + Display>>: BlockExecutionStrategy< - DB, + DB = DB, + Primitives = Self::Primitives, Error = BlockExecutionError, >; @@ -280,11 +298,13 @@ impl BlockExecutorProvider for BasicBlockExecutorProvider where F: BlockExecutionStrategyFactory, { + type Primitives = F::Primitives; + type Executor + Display>> = - BasicBlockExecutor, DB>; + BasicBlockExecutor>; type BatchExecutor + Display>> = - BasicBatchExecutor, DB>; + BasicBatchExecutor>; fn executor(&self, db: DB) -> Self::Executor where @@ -307,34 +327,26 @@ where /// A generic block executor that uses a [`BlockExecutionStrategy`] to /// execute blocks. #[allow(missing_debug_implementations, dead_code)] -pub struct BasicBlockExecutor -where - S: BlockExecutionStrategy, - DB: Database, -{ +pub struct BasicBlockExecutor { /// Block execution strategy. pub(crate) strategy: S, - _phantom: PhantomData, } -impl BasicBlockExecutor -where - S: BlockExecutionStrategy, - DB: Database, -{ +impl BasicBlockExecutor { /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { - Self { strategy, _phantom: PhantomData } + Self { strategy } } } -impl Executor for BasicBlockExecutor +impl Executor for BasicBlockExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = BlockExecutionOutput<::Receipt>; type Error = S::Error; fn init(&mut self, env_overrides: Box) { @@ -404,43 +416,44 @@ where /// A generic batch executor that uses a [`BlockExecutionStrategy`] to /// execute batches. #[allow(missing_debug_implementations)] -pub struct BasicBatchExecutor +pub struct BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Batch execution strategy. pub(crate) strategy: S, /// Keeps track of batch execution receipts and requests. - pub(crate) batch_record: BlockBatchRecord, - _phantom: PhantomData, + pub(crate) batch_record: BlockBatchRecord<::Receipt>, } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Creates a new `BasicBatchExecutor` with the given strategy. - pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { - Self { strategy, batch_record, _phantom: PhantomData } + pub const fn new( + strategy: S, + batch_record: BlockBatchRecord<::Receipt>, + ) -> Self { + Self { strategy, batch_record } } } -impl BatchExecutor for BasicBatchExecutor +impl BatchExecutor for BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = ExecutionOutcome<::Receipt>; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); + self.batch_record.set_first_block(block.header().number()); } self.strategy.apply_pre_execution_changes(block, total_difficulty)?; @@ -452,7 +465,7 @@ where self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); + let retention = self.batch_record.bundle_retention(block.header().number()); self.strategy.state_mut().merge_transitions(retention); // store receipts in the set @@ -490,7 +503,9 @@ where mod tests { use super::*; use alloy_primitives::U256; + use core::marker::PhantomData; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_primitives::EthPrimitives; use revm::db::{CacheDB, EmptyDBTyped}; use revm_primitives::{bytes, TxEnv}; use std::sync::Arc; @@ -499,6 +514,7 @@ mod tests { struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { + type Primitives = EthPrimitives; type Executor + Display>> = TestExecutor; type BatchExecutor + Display>> = TestExecutor; @@ -596,6 +612,7 @@ mod tests { } impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Primitives = EthPrimitives; type Strategy + Display>> = TestExecutorStrategy; @@ -622,10 +639,12 @@ mod tests { } } - impl BlockExecutionStrategy for TestExecutorStrategy + impl BlockExecutionStrategy for TestExecutorStrategy where DB: Database, { + type DB = DB; + type Primitives = EthPrimitives; type Error = BlockExecutionError; fn apply_pre_execution_changes( diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 3464bb96f4c..f42b942afd9 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -3,6 +3,7 @@ //! Block processing related to syncing should take care to update the metrics by using either //! [`ExecutorMetrics::execute_metered`] or [`ExecutorMetrics::metered_one`]. use crate::{execute::Executor, system_calls::OnStateHook}; +use alloy_consensus::BlockHeader; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; @@ -69,9 +70,10 @@ pub struct ExecutorMetrics { } impl ExecutorMetrics { - fn metered(&self, block: &BlockWithSenders, f: F) -> R + fn metered(&self, block: &BlockWithSenders, f: F) -> R where F: FnOnce() -> R, + B: reth_primitives_traits::Block, { // Execute the block and record the elapsed time. let execute_start = Instant::now(); @@ -79,8 +81,8 @@ impl ExecutorMetrics { let execution_duration = execute_start.elapsed().as_secs_f64(); // Update gas metrics. - self.gas_processed_total.increment(block.gas_used); - self.gas_per_second.set(block.gas_used as f64 / execution_duration); + self.gas_processed_total.increment(block.header().gas_used()); + self.gas_per_second.set(block.header().gas_used() as f64 / execution_duration); self.execution_histogram.record(execution_duration); self.execution_duration.set(execution_duration); @@ -94,19 +96,20 @@ impl ExecutorMetrics { /// of accounts, storage slots and bytecodes loaded and updated. /// Execute the given block using the provided [`Executor`] and update metrics for the /// execution. - pub fn execute_metered<'a, E, DB, O, Error>( + pub fn execute_metered<'a, E, DB, O, Error, B>( &self, executor: E, - input: BlockExecutionInput<'a, BlockWithSenders>, + input: BlockExecutionInput<'a, BlockWithSenders>, state_hook: Box, ) -> Result, Error> where E: Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = BlockExecutionOutput, Error = Error, >, + B: reth_primitives_traits::Block, { // clone here is cheap, all the metrics are Option>. additionally // they are gloally registered so that the data recorded in the hook will @@ -133,9 +136,14 @@ impl ExecutorMetrics { } /// Execute the given block and update metrics for the execution. - pub fn metered_one(&self, input: BlockExecutionInput<'_, BlockWithSenders>, f: F) -> R + pub fn metered_one( + &self, + input: BlockExecutionInput<'_, BlockWithSenders>, + f: F, + ) -> R where - F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + B: reth_primitives_traits::Block, { self.metered(input.block, || f(input)) } diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 4fdc6d367a2..7b1063533da 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -23,6 +23,8 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; pub struct NoopBlockExecutorProvider; impl BlockExecutorProvider for NoopBlockExecutorProvider { + type Primitives = EthPrimitives; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index a4dc906494c..22ba4a316e2 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -12,7 +12,7 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, Receipt, Receipts}; +use reth_primitives::{BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, Receipts}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -33,6 +33,8 @@ impl MockExecutorProvider { } impl BlockExecutorProvider for MockExecutorProvider { + type Primitives = EthPrimitives; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; @@ -116,15 +118,14 @@ impl BatchExecutor for MockExecutorProvider { } } -impl BasicBlockExecutor +impl BasicBlockExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -132,21 +133,20 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -154,13 +154,13 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } /// Accessor for batch executor receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts<::Receipt> { self.batch_record.receipts() } } diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 7e670620472..f93c5efa721 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -9,7 +9,7 @@ use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_node_api::{Block as _, BlockBody as _}; +use reth_node_api::{Block as _, BlockBody as _, NodePrimitives}; use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ @@ -38,12 +38,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider - + BlockReader - + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn next(&mut self) -> Option { if self.range.is_empty() { @@ -56,10 +54,8 @@ where impl BackfillJob where - E: BlockExecutorProvider, - P: BlockReader - + HeaderProvider - + StateProviderFactory, + E: BlockExecutorProvider>, + P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -67,11 +63,11 @@ where } /// Converts the backfill job into a stream. - pub fn into_stream(self) -> StreamBackfillJob { + pub fn into_stream(self) -> StreamBackfillJob> { self.into() } - fn execute_range(&mut self) -> BackfillJobResult { + fn execute_range(&mut self) -> BackfillJobResult> { debug!( target: "exex::backfill", range = ?self.range, @@ -169,10 +165,13 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; + type Item = BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )>; fn next(&mut self) -> Option { self.range.next().map(|block_number| self.execute_block(block_number)) @@ -181,8 +180,8 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. pub fn into_stream( @@ -191,10 +190,14 @@ where self.into() } + #[expect(clippy::type_complexity)] pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )> { let td = self .provider .header_td_by_number(block_number)? diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 46177ceda12..95da076c7c8 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -11,7 +11,8 @@ use futures::{ StreamExt, }; use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_node_api::NodePrimitives; +use reth_primitives::{BlockWithSenders, EthPrimitives}; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_prune_types::PruneModes; use reth_stages_api::ExecutionStageThresholds; @@ -38,8 +39,11 @@ struct BackfillTaskOutput { /// Ordered queue of [`JoinHandle`]s that yield [`BackfillTaskOutput`]s. type BackfillTasks = FuturesOrdered>>; -type SingleBlockStreamItem = (BlockWithSenders, BlockExecutionOutput); -type BatchBlockStreamItem = Chain; +type SingleBlockStreamItem = ( + BlockWithSenders<::Block>, + BlockExecutionOutput<::Receipt>, +); +type BatchBlockStreamItem = Chain; /// Stream for processing backfill jobs asynchronously. /// @@ -100,18 +104,12 @@ where } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider - + BlockReader - + StateProviderFactory - + Clone - + Send - + Unpin - + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -139,18 +137,12 @@ where } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider - + BlockReader - + StateProviderFactory - + Clone - + Send - + Unpin - + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -200,7 +192,10 @@ impl From> for StreamBackfillJob From> for StreamBackfillJob { +impl From> for StreamBackfillJob> +where + E: BlockExecutorProvider, +{ fn from(job: BackfillJob) -> Self { let batch_size = job.thresholds.max_blocks.map_or(DEFAULT_BATCH_SIZE, |max| max as usize); Self { diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 3d303c9bbac..f536ed515f9 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,6 +1,6 @@ use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; use reth_exex_types::ExExHead; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_provider::BlockReader; @@ -57,11 +57,12 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug + BlockReader, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, + Node::Types: NodeTypes, { /// Returns dynamic version of the context - pub fn into_dyn(self) -> ExExContextDyn { + pub fn into_dyn(self) -> ExExContextDyn<::Primitives> { ExExContextDyn::from(self) } } @@ -69,6 +70,7 @@ where impl ExExContext where Node: FullNodeComponents, + Node::Types: NodeTypes, { /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { @@ -107,19 +109,13 @@ where /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of /// notifications without a head. - pub fn set_notifications_without_head(&mut self) - where - Node::Provider: BlockReader, - { + pub fn set_notifications_without_head(&mut self) { self.notifications.set_without_head(); } /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications /// with the provided head. - pub fn set_notifications_with_head(&mut self, head: ExExHead) - where - Node::Provider: BlockReader, - { + pub fn set_notifications_with_head(&mut self, head: ExExHead) { self.notifications.set_with_head(head); } } @@ -142,7 +138,7 @@ mod tests { impl ExEx where - Node::Provider: BlockReader, + Node::Provider: BlockReader, { async fn _test_bounds(mut self) -> eyre::Result<()> { self.ctx.pool(); diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index 3ce0f488f40..12efa5f069b 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -4,8 +4,9 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; +use reth_primitives::EthPrimitives; use reth_provider::BlockReader; use tokio::sync::mpsc; @@ -13,7 +14,7 @@ use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; // TODO(0xurb) - add `node` after abstractions /// Captures the context that an `ExEx` has access to. -pub struct ExExContextDyn { +pub struct ExExContextDyn { /// The current head of the blockchain at launch. pub head: Head, /// The config of the node @@ -34,10 +35,10 @@ pub struct ExExContextDyn { /// /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is /// considered delivered by the node. - pub notifications: Box, + pub notifications: Box>, } -impl Debug for ExExContextDyn { +impl Debug for ExExContextDyn { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExContext") .field("head", &self.head) @@ -49,16 +50,16 @@ impl Debug for ExExContextDyn { } } -impl From> for ExExContextDyn +impl From> for ExExContextDyn<::Primitives> where - Node: FullNodeComponents, - Node::Provider: Debug + BlockReader, + Node: FullNodeComponents>, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { let config = ctx.config.map_chainspec(|chainspec| Box::new(chainspec) as Box); - let notifications = Box::new(ctx.notifications) as Box; + let notifications = Box::new(ctx.notifications) as Box<_>; Self { head: ctx.head, diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index ea5ddf2e8c6..a3b92e9f17a 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,14 +1,17 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; +use reth_evm::execute::BlockExecutorProvider; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::SealedHeader; +use reth_node_api::NodePrimitives; +use reth_primitives::{EthPrimitives, SealedHeader}; use reth_provider::HeaderProvider; use reth_tracing::tracing::{debug, warn}; use std::{ @@ -69,13 +72,13 @@ struct ExExMetrics { /// [`ExExHandle::new`] should be given to the `ExEx`, while the handle itself should be given to /// the manager in [`ExExManager::new`]. #[derive(Debug)] -pub struct ExExHandle { +pub struct ExExHandle { /// The execution extension's ID. id: String, /// Metrics for an `ExEx`. metrics: ExExMetrics, /// Channel to send [`ExExNotification`]s to the `ExEx`. - sender: PollSender, + sender: PollSender>, /// Channel to receive [`ExExEvent`]s from the `ExEx`. receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. @@ -86,17 +89,17 @@ pub struct ExExHandle { finished_height: Option, } -impl ExExHandle { +impl ExExHandle { /// Create a new handle for the given `ExEx`. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a /// [`mpsc::Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. - pub fn new( + pub fn new>( id: String, node_head: Head, provider: P, executor: E, - wal_handle: WalHandle, + wal_handle: WalHandle, ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); @@ -124,21 +127,21 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (notification_id, notification): &(usize, ExExNotification), - ) -> Poll>> { + (notification_id, notification): &(usize, ExExNotification), + ) -> Poll>>> { if let Some(finished_height) = self.finished_height { match notification { ExExNotification::ChainCommitted { new } => { // Skip the chain commit notification if the finished height of the ExEx is // higher than or equal to the tip of the new notification. // I.e., the ExEx has already processed the notification. - if finished_height.number >= new.tip().number { + if finished_height.number >= new.tip().number() { debug!( target: "exex::manager", exex_id = %self.id, %notification_id, ?finished_height, - new_tip = %new.tip().number, + new_tip = %new.tip().number(), "Skipping notification" ); @@ -208,15 +211,15 @@ pub struct ExExManagerMetrics { /// - Error handling /// - Monitoring #[derive(Debug)] -pub struct ExExManager

{ +pub struct ExExManager { /// Provider for querying headers. provider: P, /// Handles to communicate with the `ExEx`'s. - exex_handles: Vec, + exex_handles: Vec>, /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, + handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, /// The minimum notification ID currently present in the buffer. min_id: usize, @@ -226,7 +229,7 @@ pub struct ExExManager

{ /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, ExExNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -241,17 +244,20 @@ pub struct ExExManager

{ finished_height: watch::Sender, /// Write-Ahead Log for the [`ExExNotification`]s. - wal: Wal, + wal: Wal, /// A stream of finalized headers. finalized_header_stream: ForkChoiceStream, /// A handle to the `ExEx` manager. - handle: ExExManagerHandle, + handle: ExExManagerHandle, /// Metrics for the `ExEx` manager. metrics: ExExManagerMetrics, } -impl

ExExManager

{ +impl ExExManager +where + N: NodePrimitives, +{ /// Create a new [`ExExManager`]. /// /// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the @@ -261,9 +267,9 @@ impl

ExExManager

{ /// notifications over [`ExExManagerHandle`]s until there is capacity again. pub fn new( provider: P, - handles: Vec, + handles: Vec>, max_capacity: usize, - wal: Wal, + wal: Wal, finalized_header_stream: ForkChoiceStream, ) -> Self { let num_exexs = handles.len(); @@ -314,7 +320,7 @@ impl

ExExManager

{ } /// Returns the handle to the manager. - pub fn handle(&self) -> ExExManagerHandle { + pub fn handle(&self) -> ExExManagerHandle { self.handle.clone() } @@ -333,16 +339,17 @@ impl

ExExManager

{ /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: ExExNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; } } -impl

ExExManager

+impl ExExManager where P: HeaderProvider, + N: NodePrimitives, { /// Finalizes the WAL according to the passed finalized header. /// @@ -413,9 +420,10 @@ where } } -impl

Future for ExExManager

+impl Future for ExExManager where P: HeaderProvider + Unpin + 'static, + N: NodePrimitives, { type Output = eyre::Result<()>; @@ -456,8 +464,9 @@ where // Drain handle notifications while this.buffer.len() < this.max_capacity { if let Poll::Ready(Some((source, notification))) = this.handle_rx.poll_recv(cx) { - let committed_tip = notification.committed_chain().map(|chain| chain.tip().number); - let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number); + let committed_tip = + notification.committed_chain().map(|chain| chain.tip().number()); + let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number()); debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Received new notification"); // Commit to WAL only notifications from blockchain tree. Pipeline notifications @@ -524,9 +533,9 @@ where /// A handle to communicate with the [`ExExManager`]. #[derive(Debug)] -pub struct ExExManagerHandle { +pub struct ExExManagerHandle { /// Channel to send notifications to the `ExEx` manager. - exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, + exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, /// The number of `ExEx`'s running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -544,7 +553,7 @@ pub struct ExExManagerHandle { finished_height: watch::Receiver, } -impl ExExManagerHandle { +impl ExExManagerHandle { /// Creates an empty manager handle. /// /// Use this if there is no manager present. @@ -571,8 +580,8 @@ impl ExExManagerHandle { pub fn send( &self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.exex_tx.send((source, notification)) } @@ -583,8 +592,8 @@ impl ExExManagerHandle { pub async fn send_async( &mut self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.ready().await; self.exex_tx.send((source, notification)) } @@ -633,7 +642,7 @@ async fn make_wait_future(mut rx: watch::Receiver) -> watch::Receiver Clone for ExExManagerHandle { fn clone(&self) -> Self { Self { exex_tx: self.exex_tx.clone(), @@ -653,6 +662,7 @@ mod tests { use futures::{StreamExt, TryStreamExt}; use rand::Rng; use reth_db_common::init::init_genesis; + use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::SealedBlockWithSenders; use reth_provider::{ @@ -673,8 +683,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Send an event and check that it's delivered correctly let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random())); @@ -688,8 +703,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -705,8 +725,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -728,8 +753,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let mut exex_manager = @@ -778,8 +808,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; @@ -824,8 +859,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Check initial block height assert!(exex_handle.finished_height.is_none()); @@ -874,10 +914,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); let block1 = BlockNumHash::new(42, B256::random()); let block2 = BlockNumHash::new(10, B256::random()); @@ -921,10 +971,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); @@ -974,8 +1034,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create an ExExManager with a small max capacity let max_capacity = 2; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 954a057fc09..05892e2f90d 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -1,8 +1,11 @@ use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; +use alloy_consensus::BlockHeader; use futures::{Stream, StreamExt}; use reth_chainspec::Head; use reth_evm::execute::BlockExecutorProvider; use reth_exex_types::ExExHead; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_tracing::tracing::debug; use std::{ @@ -17,14 +20,19 @@ use tokio::sync::mpsc::Receiver; /// stream is configured with a head via [`ExExNotifications::set_with_head`] or /// [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. #[derive(Debug)] -pub struct ExExNotifications { +pub struct ExExNotifications +where + E: BlockExecutorProvider, +{ inner: ExExNotificationsInner, } /// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications /// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] /// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. -pub trait ExExNotificationsStream: Stream> + Unpin { +pub trait ExExNotificationsStream: + Stream>> + Unpin +{ /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. /// /// It's a no-op if the stream has already been configured without a head. @@ -56,7 +64,10 @@ pub trait ExExNotificationsStream: Stream> } #[derive(Debug)] -enum ExExNotificationsInner { +enum ExExNotificationsInner +where + E: BlockExecutorProvider, +{ /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. WithoutHead(ExExNotificationsWithoutHead), /// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that @@ -67,14 +78,17 @@ enum ExExNotificationsInner { Invalid, } -impl ExExNotifications { +impl ExExNotifications +where + E: BlockExecutorProvider, +{ /// Creates a new stream of [`ExExNotifications`] without a head. pub const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { inner: ExExNotificationsInner::WithoutHead(ExExNotificationsWithoutHead::new( @@ -88,15 +102,13 @@ impl ExExNotifications { } } -impl ExExNotificationsStream for ExExNotifications +impl ExExNotificationsStream for ExExNotifications where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); @@ -144,15 +156,13 @@ where impl Stream for ExExNotifications where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -169,15 +179,21 @@ where } /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. -pub struct ExExNotificationsWithoutHead { +pub struct ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, } -impl Debug for ExExNotificationsWithoutHead { +impl Debug for ExExNotificationsWithoutHead +where + E: Debug + BlockExecutorProvider, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExNotifications") .field("provider", &self.provider) @@ -187,14 +203,17 @@ impl Debug for ExExNotificationsWithoutHead { } } -impl ExExNotificationsWithoutHead { +impl ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ /// Creates a new instance of [`ExExNotificationsWithoutHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { node_head, provider, executor, notifications, wal_handle } } @@ -212,8 +231,11 @@ impl ExExNotificationsWithoutHead { } } -impl Stream for ExExNotificationsWithoutHead { - type Item = ExExNotification; +impl Stream for ExExNotificationsWithoutHead +where + E: Unpin + BlockExecutorProvider, +{ + type Item = ExExNotification; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().notifications.poll_recv(cx) @@ -229,12 +251,15 @@ impl Stream for ExExNotificationsWithoutHead { /// `exex_head.number` of 10 indicates that the ExEx has processed up to block 10, and is ready to /// process block 11. #[derive(Debug)] -pub struct ExExNotificationsWithHead { +pub struct ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, /// If true, then we need to check if the ExEx head is on the canonical chain and if not, /// revert its head. @@ -243,17 +268,20 @@ pub struct ExExNotificationsWithHead { /// the missing blocks. pending_check_backfill: bool, /// The backfill job to run before consuming any notifications. - backfill_job: Option>, + backfill_job: Option>>, } -impl ExExNotificationsWithHead { +impl ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ /// Creates a new [`ExExNotificationsWithHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, ) -> Self { Self { @@ -272,20 +300,18 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { /// Checks if the ExEx head is on the canonical chain. /// /// If the head block is not found in the database or it's ahead of the node head, it means /// we're not on the canonical chain and we need to revert the notification with the ExEx /// head block. - fn check_canonical(&mut self) -> eyre::Result> { + fn check_canonical(&mut self) -> eyre::Result>> { if self.provider.is_known(&self.exex_head.block.hash)? && self.exex_head.block.number <= self.node_head.number { @@ -309,7 +335,7 @@ where // Update the head block hash to the parent hash of the first committed block. let committed_chain = notification.committed_chain().unwrap(); let new_exex_head = - (committed_chain.first().parent_hash, committed_chain.first().number - 1).into(); + (committed_chain.first().parent_hash(), committed_chain.first().number() - 1).into(); debug!(target: "exex::notifications", old_exex_head = ?self.exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated"); self.exex_head.block = new_exex_head; @@ -354,15 +380,13 @@ where impl Stream for ExExNotificationsWithHead where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -402,7 +426,7 @@ where this.exex_head.block = committed_chain.tip().num_hash(); } else if let Some(reverted_chain) = notification.reverted_chain() { let first_block = reverted_chain.first(); - this.exex_head.block = (first_block.parent_hash, first_block.number - 1).into(); + this.exex_head.block = (first_block.parent_hash(), first_block.number() - 1).into(); } Poll::Ready(Some(Ok(notification))) diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 86943f33cfa..b5e0f2034e8 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -3,9 +3,11 @@ use std::{ collections::{BinaryHeap, HashSet}, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{map::FbHashMap, BlockNumber, B256}; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; /// The block cache of the WAL. /// @@ -91,16 +93,16 @@ impl BlockCache { } /// Inserts the blocks from the notification into the cache with the given file ID. - pub(super) fn insert_notification_blocks_with_file_id( + pub(super) fn insert_notification_blocks_with_file_id( &mut self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) { let reverted_chain = notification.reverted_chain(); let committed_chain = notification.committed_chain(); let max_block = - reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number).max(); + reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number()).max(); if let Some(max_block) = max_block { self.notification_max_blocks.push(Reverse((max_block, file_id))); } @@ -108,13 +110,13 @@ impl BlockCache { if let Some(committed_chain) = &committed_chain { for block in committed_chain.blocks().values() { let cached_block = CachedBlock { - block: (block.number, block.hash()).into(), - parent_hash: block.parent_hash, + block: (block.number(), block.hash()).into(), + parent_hash: block.parent_hash(), }; self.committed_blocks.insert(block.hash(), (file_id, cached_block)); } - self.highest_committed_block_height = Some(committed_chain.tip().number); + self.highest_committed_block_height = Some(committed_chain.tip().number()); } } diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 066fbe1b58c..fb6be6e8c85 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -3,6 +3,8 @@ mod cache; pub use cache::BlockCache; mod storage; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; pub use storage::Storage; mod metrics; use metrics::Metrics; @@ -32,23 +34,26 @@ use reth_tracing::tracing::{debug, instrument}; /// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the /// WAL. #[derive(Debug, Clone)] -pub struct Wal { - inner: Arc, +pub struct Wal { + inner: Arc>, } -impl Wal { +impl Wal +where + N: NodePrimitives, +{ /// Creates a new instance of [`Wal`]. pub fn new(directory: impl AsRef) -> eyre::Result { Ok(Self { inner: Arc::new(WalInner::new(directory)?) }) } /// Returns a read-only handle to the WAL. - pub fn handle(&self) -> WalHandle { + pub fn handle(&self) -> WalHandle { WalHandle { wal: self.inner.clone() } } /// Commits the notification to WAL. - pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { self.inner.commit(notification) } @@ -63,7 +68,7 @@ impl Wal { /// Returns an iterator over all notifications in the WAL. pub fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { self.inner.iter_notifications() } @@ -75,16 +80,19 @@ impl Wal { /// Inner type for the WAL. #[derive(Debug)] -struct WalInner { +struct WalInner { next_file_id: AtomicU32, /// The underlying WAL storage backed by a file. - storage: Storage, + storage: Storage, /// WAL block cache. See [`cache::BlockCache`] docs for more details. block_cache: RwLock, metrics: Metrics, } -impl WalInner { +impl WalInner +where + N: NodePrimitives, +{ fn new(directory: impl AsRef) -> eyre::Result { let mut wal = Self { next_file_id: AtomicU32::new(0), @@ -137,7 +145,7 @@ impl WalInner { reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()), committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range()) ))] - fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { let mut block_cache = self.block_cache.write(); let file_id = self.next_file_id.fetch_add(1, Ordering::Relaxed); @@ -187,7 +195,7 @@ impl WalInner { /// Returns an iterator over all notifications in the WAL. fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { let Some(range) = self.storage.files_range()? else { return Ok(Box::new(std::iter::empty())) }; @@ -198,16 +206,19 @@ impl WalInner { /// A read-only handle to the WAL that can be shared. #[derive(Debug)] -pub struct WalHandle { - wal: Arc, +pub struct WalHandle { + wal: Arc>, } -impl WalHandle { +impl WalHandle +where + N: NodePrimitives, +{ /// Returns the notification for the given committed block hash if it exists. pub fn get_committed_notification_by_block_hash( &self, block_hash: &B256, - ) -> eyre::Result> { + ) -> eyre::Result>> { let Some(file_id) = self.wal.block_cache().get_file_id_by_committed_block_hash(block_hash) else { return Ok(None) diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index aaa4398fd0b..699d88ba2a7 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -6,6 +6,8 @@ use std::{ use eyre::OptionExt; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_tracing::tracing::debug; use tracing::instrument; @@ -16,18 +18,22 @@ static FILE_EXTENSION: &str = "wal"; /// Each notification is represented by a single file that contains a MessagePack-encoded /// notification. #[derive(Debug, Clone)] -pub struct Storage { +pub struct Storage { /// The path to the WAL file. path: PathBuf, + _pd: std::marker::PhantomData, } -impl Storage { +impl Storage +where + N: NodePrimitives, +{ /// Creates a new instance of [`Storage`] backed by the file at the given path and creates /// it doesn't exist. pub(super) fn new(path: impl AsRef) -> eyre::Result { reth_fs_util::create_dir_all(&path)?; - Ok(Self { path: path.as_ref().to_path_buf() }) + Ok(Self { path: path.as_ref().to_path_buf(), _pd: std::marker::PhantomData }) } fn file_path(&self, id: u32) -> PathBuf { @@ -110,7 +116,7 @@ impl Storage { pub(super) fn iter_notifications( &self, range: RangeInclusive, - ) -> impl Iterator> + '_ { + ) -> impl Iterator)>> + '_ { range.map(move |id| { let (notification, size) = self.read_notification(id)?.ok_or_eyre("notification {id} not found")?; @@ -124,7 +130,7 @@ impl Storage { pub(super) fn read_notification( &self, file_id: u32, - ) -> eyre::Result> { + ) -> eyre::Result, u64)>> { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Reading notification from WAL"); @@ -136,7 +142,7 @@ impl Storage { let size = file.metadata()?.len(); // Deserialize using the bincode- and msgpack-compatible serde wrapper - let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_> = + let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_, N> = rmp_serde::decode::from_read(&mut file).map_err(|err| { eyre::eyre!("failed to decode notification from {file_path:?}: {err:?}") })?; @@ -153,14 +159,14 @@ impl Storage { pub(super) fn write_notification( &self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) -> eyre::Result { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); // Serialize using the bincode- and msgpack-compatible serde wrapper let notification = - reth_exex_types::serde_bincode_compat::ExExNotification::from(notification); + reth_exex_types::serde_bincode_compat::ExExNotification::::from(notification); reth_fs_util::atomic_write_file(&file_path, |file| { rmp_serde::encode::write(file, ¬ification) @@ -186,7 +192,7 @@ mod tests { let mut rng = generators::rng(); let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; let old_block = random_block(&mut rng, 0, Default::default()) .seal_with_senders() @@ -215,7 +221,7 @@ mod tests { #[test] fn test_files_range() -> eyre::Result<()> { let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; // Create WAL files File::create(storage.file_path(1))?; diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index ca0ea46551c..9acad4d4b65 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -80,7 +80,7 @@ pub struct TestExecutorBuilder; impl ExecutorBuilder for TestExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = EthEvmConfig; type Executor = MockExecutorProvider; diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 5ded40d061b..44eeb25084a 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -7,30 +7,30 @@ use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc>, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc>, + old: Arc>, /// The new chain after reorg. - new: Arc>, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc>, + old: Arc>, }, } -impl ExExNotification { +impl ExExNotification { /// Returns the committed chain from the [`Self::ChainCommitted`] and [`Self::ChainReorged`] /// variants, if any. - pub fn committed_chain(&self) -> Option> { + pub fn committed_chain(&self) -> Option>> { match self { Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), Self::ChainReverted { .. } => None, @@ -39,7 +39,7 @@ impl ExExNotification { /// Returns the reverted chain from the [`Self::ChainReorged`] and [`Self::ChainReverted`] /// variants, if any. - pub fn reverted_chain(&self) -> Option> { + pub fn reverted_chain(&self) -> Option>> { match self { Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), Self::ChainCommitted { .. } => None, diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 5d25d8d592c..d640c0dbb0e 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -53,7 +53,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Evm: ConfigureEvm

; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. type Consensus: Consensus + Clone + Unpin + 'static; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 95c0c764b5c..46b6824dba7 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -10,7 +10,7 @@ use crate::{ use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::NodeTypesWithEngine; +use reth_node_api::{NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; @@ -377,7 +377,7 @@ where Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, + Executor: BlockExecutorProvider::Primitives>, Cons: Consensus + Clone + Unpin + 'static, { type Components = Components; diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 4e8f63f412b..0c75ef3016f 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -13,7 +13,9 @@ pub trait ExecutorBuilder: Send { type EVM: ConfigureEvm
; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider< + Primitives = ::Primitives, + >; /// Creates the EVM config. fn build_evm( @@ -26,7 +28,8 @@ impl ExecutorBuilder for F where Node: FullNodeTypes, EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, + Executor: + BlockExecutorProvider::Primitives>, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 1fe35e554d5..a3f3017463d 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -27,7 +27,7 @@ use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::NodeTypesWithEngine; +use reth_node_api::{NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; @@ -44,7 +44,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Evm: ConfigureEvm
; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. type Consensus: Consensus + Clone + Unpin + 'static; @@ -99,7 +99,7 @@ where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, + Executor: BlockExecutorProvider::Primitives>, Cons: Consensus + Clone + Unpin + 'static, { type Pool = Pool; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 092c1fdf651..5c76718a3b1 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -35,12 +35,12 @@ pub fn build_networked_pipeline( max_block: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, Client: EthBlockClient + 'static, - Executor: BlockExecutorProvider, + Executor: BlockExecutorProvider, N::Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, @@ -86,13 +86,13 @@ pub fn build_pipeline( prune_config: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, H: HeaderDownloader
+ 'static, B: BodyDownloader> + 'static, - Executor: BlockExecutorProvider, + Executor: BlockExecutorProvider, N::Primitives: FullNodePrimitives< Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 807f224ca4b..b5d6fac5073 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true reth-optimism-consensus.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm revm.workspace = true diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 1c93d2b71d0..4b4bccae406 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -20,6 +20,7 @@ use reth_evm::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; +use reth_optimism_primitives::OpPrimitives; use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; @@ -53,6 +54,7 @@ where EvmConfig: Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, { + type Primitives = OpPrimitives; type Strategy + Display>> = OpExecutionStrategy; @@ -109,11 +111,13 @@ where } } -impl BlockExecutionStrategy for OpExecutionStrategy +impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, EvmConfig: ConfigureEvm
, { + type DB = DB; + type Primitives = OpPrimitives; type Error = BlockExecutionError; fn init(&mut self, tx_env_overrides: Box) { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index d6cd47cf2af..429cb9ae229 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -277,7 +277,7 @@ pub struct OpExecutorBuilder; impl ExecutorBuilder for OpExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = OpEvmConfig; type Executor = BasicBlockExecutorProvider; diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 15ba049250f..01b0bd421d7 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -14,7 +14,7 @@ use revm::db::states::bundle_state::BundleRetention; /// - recording receipts during execution of multiple blocks. /// - pruning receipts according to the pruning configuration. /// - batch range if known -#[derive(Debug, Default)] +#[derive(Debug)] pub struct BlockBatchRecord { /// Pruning configuration. prune_modes: PruneModes, @@ -43,6 +43,19 @@ pub struct BlockBatchRecord { tip: Option, } +impl Default for BlockBatchRecord { + fn default() -> Self { + Self { + prune_modes: Default::default(), + receipts: Default::default(), + requests: Default::default(), + pruning_address_filter: Default::default(), + first_block: Default::default(), + tip: Default::default(), + } + } +} + impl BlockBatchRecord { /// Create a new receipts recorder with the given pruning configuration. pub fn new(prune_modes: PruneModes) -> Self @@ -83,10 +96,7 @@ impl BlockBatchRecord { } /// Returns all recorded receipts. - pub fn take_receipts(&mut self) -> Receipts - where - T: Default, - { + pub fn take_receipts(&mut self) -> Receipts { core::mem::take(&mut self.receipts) } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 8f5c84835aa..46b3888f05b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -48,7 +48,7 @@ //! Events: //! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, +//! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport @@ -130,7 +130,7 @@ //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, +//! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport @@ -198,7 +198,7 @@ use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, @@ -278,7 +278,12 @@ where Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let module_config = module_config.into(); server_config @@ -630,7 +635,12 @@ where Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can @@ -1104,7 +1114,8 @@ where RpcBlock, RpcReceipt, > + EthApiTypes, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: + BlockExecutorProvider>, { /// Register Eth Namespace /// @@ -1250,7 +1261,8 @@ where pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: + BlockExecutorProvider>, { DebugApi::new( self.provider.clone(), @@ -1306,7 +1318,12 @@ where Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures the auth module that includes the diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 9fc1be93a2f..d64cdf3afea 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -18,17 +18,17 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; +use reth_primitives::{Block, BlockExt, NodePrimitives, SealedBlockWithSenders}; use reth_primitives_traits::SignedTransaction; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, - TransactionVariant, + BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, + StateProviderFactory, TransactionVariant, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -81,7 +81,9 @@ where + StateProviderFactory + 'static, Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives::Provider as BlockReader>::Block>, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -800,7 +802,9 @@ where + StateProviderFactory + 'static, Eth: EthApiSpec + EthTransactions + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives::Provider as BlockReader>::Block>, + >, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index a5e29bb739f..1885c8ad2e0 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; +use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction, TxReceipt}; use alloy_eips::eip4844::kzg_to_versioned_hash; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, @@ -15,7 +15,7 @@ use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Block, GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, StateProviderFactory, WithdrawalsProvider, @@ -95,7 +95,12 @@ where + AccountReader + WithdrawalsProvider + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -258,7 +263,7 @@ where fn ensure_payment( &self, block: &Block, - output: &BlockExecutionOutput, + output: &BlockExecutionOutput<::Receipt>, message: &BidTrace, ) -> Result<(), ValidationApiError> { let (mut balance_before, balance_after) = if let Some(acc) = @@ -292,7 +297,7 @@ where .zip(block.body.transactions.last()) .ok_or(ValidationApiError::ProposerPayment)?; - if !receipt.success { + if !receipt.status() { return Err(ValidationApiError::ProposerPayment) } @@ -407,7 +412,12 @@ where + WithdrawalsProvider + Clone + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { async fn validate_builder_submission_v1( &self, diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index d04a96470a0..df5a4c542bf 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -20,8 +20,9 @@ //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; //! # use reth_evm::execute::BlockExecutorProvider; +//! # use reth_primitives::EthPrimitives; //! -//! # fn create(exec: impl BlockExecutorProvider) { +//! # fn create(exec: impl BlockExecutorProvider) { //! //! let provider_factory = create_test_provider_factory(); //! let static_file_producer = diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index ce969f2577d..f7832dd788e 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -67,7 +67,10 @@ use super::missing_static_data_error; /// values to [`tables::PlainStorageState`] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage +where + E: BlockExecutorProvider, +{ /// The stage's internal block executor executor_provider: E, /// The commit thresholds of the execution stage. @@ -82,25 +85,28 @@ pub struct ExecutionStage { /// Input for the post execute commit hook. /// Set after every [`ExecutionStage::execute`] and cleared after /// [`ExecutionStage::post_execute_commit`]. - post_execute_commit_input: Option, + post_execute_commit_input: Option>, /// Input for the post unwind commit hook. /// Set after every [`ExecutionStage::unwind`] and cleared after /// [`ExecutionStage::post_unwind_commit`]. - post_unwind_commit_input: Option, + post_unwind_commit_input: Option>, /// Handle to communicate with `ExEx` manager. - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, /// Executor metrics. metrics: ExecutorMetrics, } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Create new execution stage with specified config. pub fn new( executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> Self { Self { external_clean_threshold, @@ -257,13 +263,13 @@ impl ExecutionStage { impl Stage for ExecutionStage where - E: BlockExecutorProvider, + E: BlockExecutorProvider>, Provider: DBProvider - + BlockReader + + BlockReader::Block> + StaticFileProviderFactory + StatsReader + BlockHashReader - + StateWriter + + StateWriter::Receipt> + StateCommitmentProvider, { /// Return the id of the stage @@ -373,7 +379,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used(); + stage_checkpoint.progress.processed += block.header().gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -512,7 +518,8 @@ where stage_checkpoint.progress.processed -= provider .block_by_number(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? - .gas_used; + .header() + .gas_used(); } } let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint { diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index ccba73afbc1..3d756ae92c0 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -26,7 +26,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use std::{fmt::Display, sync::Arc}; pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); @@ -59,7 +59,7 @@ pub struct CustomExecutorBuilder; impl ExecutorBuilder for CustomExecutorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type EVM = EthEvmConfig; @@ -88,6 +88,7 @@ pub struct CustomExecutorStrategyFactory { } impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Primitives = EthPrimitives; type Strategy + Display>> = CustomExecutorStrategy; fn create_strategy(&self, db: DB) -> Self::Strategy @@ -135,10 +136,12 @@ where } } -impl BlockExecutionStrategy for CustomExecutorStrategy +impl BlockExecutionStrategy for CustomExecutorStrategy where DB: Database + Display>, { + type DB = DB; + type Primitives = EthPrimitives; type Error = BlockExecutionError; fn apply_pre_execution_changes( diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index b9a4fc26a95..e7999818ae1 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -153,7 +153,7 @@ pub struct MyExecutorBuilder; impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = BasicBlockExecutorProvider>; diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index f683af4e430..29d5051434b 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -28,7 +28,7 @@ use reth_node_ethereum::{ node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::TransactionSigned; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; use std::{collections::HashMap, convert::Infallible, sync::Arc}; @@ -226,7 +226,7 @@ pub struct MyExecutorBuilder { impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = BasicBlockExecutorProvider>; From 95b52b3be09df93449ac621cc467b45d40b95ff4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 29 Nov 2024 17:57:38 +0400 Subject: [PATCH 780/970] chore: relax pipeline bounds (#13011) --- crates/node/builder/src/setup.rs | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 5c76718a3b1..ec4ee4956dd 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -12,9 +12,9 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; -use reth_node_api::{BodyTy, FullNodePrimitives}; +use reth_node_api::{BodyTy, HeaderTy}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -39,13 +39,8 @@ pub fn build_networked_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - Client: EthBlockClient + 'static, + Client: BlockClient
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -80,7 +75,7 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc, + consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, @@ -90,14 +85,9 @@ pub fn build_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader
+ 'static, + H: HeaderDownloader
> + 'static, B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, { let mut builder = Pipeline::::builder(); From db6e5b5cd41ccdeff20f59d51f404e6993b796da Mon Sep 17 00:00:00 2001 From: tedison <76473430+edisontim@users.noreply.github.com> Date: Fri, 29 Nov 2024 09:15:21 -0500 Subject: [PATCH 781/970] feat: make js-tracer feature optional for node builder crate (#12178) Co-authored-by: Matthias Seitz --- bin/reth/Cargo.toml | 2 +- crates/ethereum/node/Cargo.toml | 3 +++ crates/ethereum/node/src/lib.rs | 3 +++ crates/node/builder/Cargo.toml | 3 ++- crates/node/builder/src/lib.rs | 3 +++ crates/optimism/bin/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 3 +++ crates/optimism/node/src/lib.rs | 3 +++ 8 files changed, 19 insertions(+), 3 deletions(-) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index a152bea2681..cf9c53261b4 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -59,7 +59,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["js-tracer"] } reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index e6f47483b58..55421cf8478 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -70,6 +70,9 @@ rand.workspace = true [features] default = [] +js-tracer = [ + "reth-node-builder/js-tracer" +] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 421cee37fb0..8dae6031577 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for ethereum-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 781112d93c8..26d157e1e0c 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -45,7 +45,7 @@ reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true -reth-rpc = { workspace = true, features = ["js-tracer"] } +reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-builder.workspace = true reth-rpc-engine-api.workspace = true @@ -96,6 +96,7 @@ tempfile.workspace = true [features] default = [] +js-tracer = ["reth-rpc/js-tracer"] test-utils = [ "reth-db/test-utils", "reth-blockchain-tree/test-utils", diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index 899317f158c..a4f87c47984 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 45f4492e82b..9007d084891 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true exclude.workspace = true [dependencies] -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["js-tracer"] } reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-provider.workspace = true diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 5f100f0a28d..79e0c451b79 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -105,6 +105,9 @@ asm-keccak = [ "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak" ] +js-tracer = [ + "reth-node-builder/js-tracer" +] test-utils = [ "reth-tasks", "reth-e2e-test-utils", diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 7af0f3b8a72..81db8b2b7fc 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Optimism-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", From b10ffba33d84bf98c6e4f385aaa36e7d8584eca3 Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Fri, 29 Nov 2024 21:21:45 +0700 Subject: [PATCH 782/970] feat: add check empty when broadcasting transactions (#13008) Co-authored-by: Matthias Seitz --- crates/net/network/src/transactions/mod.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index e87731a0fd5..e17069b67c5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -121,7 +121,11 @@ impl TransactionsHandle { /// /// Note: this only propagates the transactions that are known to the pool. pub fn propagate_hashes_to(&self, hash: impl IntoIterator, peer: PeerId) { - self.send(TransactionsCommand::PropagateHashesTo(hash.into_iter().collect(), peer)) + let hashes = hash.into_iter().collect::>(); + if hashes.is_empty() { + return + } + self.send(TransactionsCommand::PropagateHashesTo(hashes, peer)) } /// Request the active peer IDs from the [`TransactionsManager`]. @@ -132,7 +136,12 @@ impl TransactionsHandle { } /// Manually propagate full transactions to a specific peer. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions_to(&self, transactions: Vec, peer: PeerId) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer)) } @@ -140,7 +149,12 @@ impl TransactionsHandle { /// /// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in /// full. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions(&self, transactions: Vec) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactions(transactions)) } @@ -149,6 +163,9 @@ impl TransactionsHandle { &self, peers: Vec, ) -> Result>, RecvError> { + if peers.is_empty() { + return Ok(Default::default()) + } let (tx, rx) = oneshot::channel(); self.send(TransactionsCommand::GetTransactionHashes { peers, tx }); rx.await From f6895126dd9abcb490410856feb2480889627f76 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 29 Nov 2024 21:40:17 +0700 Subject: [PATCH 783/970] perf: remove clone in trie walker (#13004) --- crates/trie/db/tests/walker.rs | 7 ++++--- crates/trie/trie/src/walker.rs | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 06355ff6d48..0e0b094920b 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -63,13 +63,14 @@ where // We're traversing the path in lexicographical order. for expected in expected { - let got = walker.advance().unwrap(); + walker.advance().unwrap(); + let got = walker.key().cloned(); assert_eq!(got.unwrap(), Nibbles::from_nibbles_unchecked(expected.clone())); } // There should be 8 paths traversed in total from 3 branches. - let got = walker.advance().unwrap(); - assert!(got.is_none()); + walker.advance().unwrap(); + assert!(walker.key().is_none()); } #[test] diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index d1c5247966d..647c1486ef0 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -145,11 +145,12 @@ impl TrieWalker { } /// Advances the walker to the next trie node and updates the skip node flag. + /// The new key can then be obtained via `key()`. /// /// # Returns /// - /// * `Result, Error>` - The next key in the trie or an error. - pub fn advance(&mut self) -> Result, DatabaseError> { + /// * `Result<(), Error>` - Unit on success or an error. + pub fn advance(&mut self) -> Result<(), DatabaseError> { if let Some(last) = self.stack.last() { if !self.can_skip_current_node && self.children_are_in_trie() { // If we can't skip the current node and the children are in the trie, @@ -167,8 +168,7 @@ impl TrieWalker { self.update_skip_node(); } - // Return the current key. - Ok(self.key().cloned()) + Ok(()) } /// Retrieves the current root node from the DB, seeking either the exact node or the next one. From 7f11fa536879d10d8c9e27b14ff89d1a59bee557 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 15:50:12 +0100 Subject: [PATCH 784/970] chore: relax block impl bounds (#13013) --- crates/primitives-traits/src/block/body.rs | 1 + crates/primitives-traits/src/block/header.rs | 2 ++ crates/primitives-traits/src/block/mod.rs | 4 ++-- crates/primitives/src/block.rs | 5 ++--- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 7491c75faf9..8b1b86a9fe4 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -30,6 +30,7 @@ pub trait BlockBody: + MaybeSerde + MaybeArbitrary + MaybeSerdeBincodeCompat + + 'static { /// Ordered list of signed transactions as committed in block. type Transaction: SignedTransaction; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index e03535dd308..6ac85d82caa 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -30,6 +30,7 @@ pub trait BlockHeader: + MaybeSerde + MaybeArbitrary + MaybeSerdeBincodeCompat + + 'static { } @@ -50,5 +51,6 @@ impl BlockHeader for T where + MaybeSerde + MaybeArbitrary + MaybeSerdeBincodeCompat + + 'static { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 5b22ff590be..fcbf02a76c6 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -41,10 +41,10 @@ pub trait Block: + MaybeArbitrary { /// Header part of the block. - type Header: BlockHeader + 'static; + type Header: BlockHeader; /// The block's body contains the transactions in the block. - type Body: BlockBody + Send + Sync + Unpin + 'static; + type Body: BlockBody; /// Create new block instance. fn new(header: Self::Header, body: Self::Body) -> Self; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index c4905458c75..267464be09b 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -457,9 +457,8 @@ where impl reth_primitives_traits::Block for SealedBlock where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, - Self: Serialize + for<'a> Deserialize<'a>, + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, { type Header = H; type Body = B; From b09c345257cda4a88e8e347654e946a20f9e5cb7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 15:51:18 +0100 Subject: [PATCH 785/970] feat: add blob_gas_used to block body (#13012) --- crates/consensus/common/src/validation.rs | 9 ++++++--- crates/primitives-traits/src/block/body.rs | 14 ++++++++++++-- crates/primitives/src/block.rs | 5 ----- crates/rpc/rpc-eth-types/src/fee_history.rs | 6 +++--- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 9e7f8d451ff..8035f8bf61c 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -65,11 +65,14 @@ pub fn validate_shanghai_withdrawals Result<(), ConsensusError> { +pub fn validate_cancun_gas( + block: &SealedBlock, +) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let total_blob_gas = block.blob_gas_used(); + let header_blob_gas_used = + block.header().blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let total_blob_gas = block.body.blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { got: header_blob_gas_used, diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 8b1b86a9fe4..c14c62c0f1e 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,8 +1,8 @@ //! Block body abstraction. use alloc::{fmt, vec::Vec}; - -use alloy_eips::eip4895::Withdrawals; +use alloy_consensus::Transaction; +use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; use crate::{ FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, @@ -49,4 +49,14 @@ pub trait BlockBody: /// Returns block ommers if any. fn ommers(&self) -> Option<&[Self::OmmerHeader]>; + + /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. + fn blob_gas_used(&self) -> u64 { + // TODO(mattss): simplify after + self.transactions() + .iter() + .filter_map(|tx| tx.blob_versioned_hashes()) + .map(|hashes| hashes.len() as u64 * DATA_GAS_PER_BLOB) + .sum() + } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 267464be09b..38cd78ffb44 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -273,11 +273,6 @@ impl SealedBlock { self.body.blob_transactions_iter() } - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() - } - /// Returns whether or not the block contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 922c3f9d474..35233e6c219 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -17,13 +17,13 @@ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; +use reth_primitives_traits::{Block, BlockBody}; +use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; use tracing::trace; -use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; - use super::{EthApiError, EthStateCache}; /// Contains cached fee history entries for blocks. @@ -366,7 +366,7 @@ impl FeeHistoryEntry { base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), - blob_gas_used_ratio: block.blob_gas_used() as f64 / + blob_gas_used_ratio: block.body().blob_gas_used() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, excess_blob_gas: block.excess_blob_gas, blob_gas_used: block.blob_gas_used, From 36eaf565d9b7f40b05b10df4b1de0d0a9bfe955f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 29 Nov 2024 17:08:58 +0100 Subject: [PATCH 786/970] chore: add default for 2718 transactions (#13018) --- crates/primitives-traits/src/block/body.rs | 28 ++++++++++++++++--- crates/primitives/src/block.rs | 15 +++++----- .../rpc-types-compat/src/engine/payload.rs | 8 +++--- 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index c14c62c0f1e..b0fe1e3d082 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,13 +1,13 @@ //! Block body abstraction. -use alloc::{fmt, vec::Vec}; -use alloy_consensus::Transaction; -use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; - use crate::{ FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, SignedTransaction, }; +use alloc::{fmt, vec::Vec}; +use alloy_consensus::Transaction; +use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; +use alloy_primitives::Bytes; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -59,4 +59,24 @@ pub trait BlockBody: .map(|hashes| hashes.len() as u64 * DATA_GAS_PER_BLOB) .sum() } + + /// Returns an iterator over the encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions_iter")] + fn encoded_2718_transactions_iter(&self) -> impl Iterator> + '_ { + self.transactions().iter().map(|tx| tx.encoded_2718()) + } + + /// Returns a vector of encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + fn encoded_2718_transactions(&self) -> Vec { + self.encoded_2718_transactions_iter().map(Into::into).collect() + } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 38cd78ffb44..ae2af546c28 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -417,13 +417,14 @@ where Ok(()) } - /// Returns a vector of transactions RLP encoded with - /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec - where - B::Transaction: Encodable2718, - { - self.body.transactions().iter().map(|tx| tx.encoded_2718().into()).collect() + /// Returns a vector of encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + pub fn encoded_2718_transactions(&self) -> Vec { + self.body.encoded_2718_transactions() } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 46bc9502c57..afe50d3af5a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -124,7 +124,7 @@ pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { /// Converts [`SealedBlock`] to [`ExecutionPayloadV1`] pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, @@ -145,7 +145,7 @@ pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV2`] pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -170,7 +170,7 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV3`], and returns the parent beacon block root. pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), @@ -334,7 +334,7 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, From aa93e7f8a75a22e8ccdc061086b98d7ef1393ae9 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Fri, 29 Nov 2024 21:12:25 +0200 Subject: [PATCH 787/970] refactor: Make `BlockType` generic and relax implementations (#12955) --- crates/primitives/src/block.rs | 50 ++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index ae2af546c28..9e00a2e582c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -10,7 +10,7 @@ use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction, Transaction}; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -601,9 +601,9 @@ impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, )] #[rlp(trailing)] -pub struct BlockBody { +pub struct BlockBody { /// Transactions in the block - pub transactions: Vec, + pub transactions: Vec, /// Uncle headers for the given block pub ommers: Vec
, /// Withdrawals in the block. @@ -616,6 +616,22 @@ impl BlockBody { Block { header, body: self } } + /// Returns an iterator over all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.blob_transactions_iter() + .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) + .flatten() + } + + /// Returns all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes(&self) -> Vec<&B256> { + self.blob_versioned_hashes_iter().collect() + } +} + +impl BlockBody { /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -626,7 +642,9 @@ impl BlockBody { pub fn calculate_withdrawals_root(&self) -> Option { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } +} +impl BlockBody { /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -641,37 +659,23 @@ impl BlockBody { /// Returns an iterator over all blob transactions of the block #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { + pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { self.transactions.iter().filter(|tx| tx.is_eip4844()) } /// Returns only the blob transactions, if any, from the block body. #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { + pub fn blob_transactions(&self) -> Vec<&T> { self.blob_transactions_iter().collect() } - - /// Returns an iterator over all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() - } - - /// Returns all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } } -impl InMemorySize for BlockBody { +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] fn size(&self) -> usize { - self.transactions.iter().map(TransactionSigned::size).sum::() + - self.transactions.capacity() * core::mem::size_of::() + + self.transactions.iter().map(T::size).sum::() + + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * core::mem::size_of::
() + self.withdrawals @@ -1210,7 +1214,7 @@ mod tests { #[test] fn empty_block_rlp() { - let body = BlockBody::default(); + let body: BlockBody = BlockBody::default(); let mut buf = Vec::new(); body.encode(&mut buf); let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); From 55ddaab1e447ba46e8bfe82bde59d5449bd9c38b Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Sat, 30 Nov 2024 04:10:56 +0700 Subject: [PATCH 788/970] feat: make generic data primitives EvmEnvProvider (#12981) --- crates/evm/src/provider.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 0d4f45c4d9d..5f86eb74dd4 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -3,6 +3,7 @@ use crate::ConfigureEvmEnv; use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; +use reth_primitives::NodePrimitives; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -12,7 +13,7 @@ use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] -pub trait EvmEnvProvider: Send + Sync { +pub trait EvmEnvProvider: Send + Sync { /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_env_at( @@ -26,10 +27,10 @@ pub trait EvmEnvProvider: Send + Sync { EvmConfig: ConfigureEvmEnv
; /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the - /// given [Header]. + /// given block header. fn env_with_header( &self, - header: &Header, + header: &N::BlockHeader, evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where @@ -42,12 +43,12 @@ pub trait EvmEnvProvider: Send + Sync { } /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// [Header]. + /// block header. fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &N::BlockHeader, evm_config: EvmConfig, ) -> ProviderResult<()> where @@ -64,11 +65,11 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv
; - /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given block header. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &N::BlockHeader, evm_config: EvmConfig, ) -> ProviderResult<()> where From 5d7115035585395d77934f0bc4bac281daf47dff Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 30 Nov 2024 02:26:36 +0400 Subject: [PATCH 789/970] feat: consensus trait generic over NodePrimitives (#13026) --- Cargo.lock | 1 + .../src/commands/debug_cmd/build_block.rs | 4 +- .../src/commands/debug_cmd/replay_engine.rs | 4 +- crates/blockchain-tree/src/chain.rs | 2 +- crates/blockchain-tree/src/externals.rs | 6 +- .../consensus/beacon/src/engine/test_utils.rs | 12 ++-- crates/consensus/consensus/src/lib.rs | 58 +++++++++++++------ crates/consensus/consensus/src/noop.rs | 10 ++-- crates/consensus/consensus/src/test_utils.rs | 28 ++++----- crates/engine/local/src/service.rs | 4 +- crates/engine/service/src/service.rs | 6 +- crates/engine/tree/src/tree/mod.rs | 8 +-- crates/ethereum/consensus/src/lib.rs | 35 +++++++---- crates/ethereum/node/src/node.rs | 4 +- crates/node/api/src/node.rs | 4 +- crates/node/builder/src/components/builder.rs | 4 +- .../node/builder/src/components/consensus.rs | 12 +++- crates/node/builder/src/components/mod.rs | 8 +-- crates/node/builder/src/launch/common.rs | 9 --- crates/node/builder/src/launch/engine.rs | 8 ++- crates/node/builder/src/launch/mod.rs | 2 +- crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/lib.rs | 23 +++++--- crates/optimism/node/src/node.rs | 4 +- crates/rpc/rpc-builder/src/lib.rs | 14 ++--- crates/rpc/rpc/src/validation.rs | 6 +- 26 files changed, 165 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0ad6f5b230..e7859a6cc30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8283,6 +8283,7 @@ dependencies = [ "reth-consensus-common", "reth-optimism-chainspec", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-trie-common", "tracing", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index dc00e07d883..e08c32b93a4 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -17,7 +17,7 @@ use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, FullConsensus}; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; @@ -128,7 +128,7 @@ impl> Command { ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 04d3b5763ae..f0016a129bd 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -12,7 +12,7 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_db::DatabaseEnv; use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_fs_util as fs; @@ -92,7 +92,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index ba4f91d9c79..a3cbd432de8 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -11,7 +11,7 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{ConsensusError, PostExecutionInput}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 2a825921f89..9e72008e838 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,7 +1,7 @@ //! Blockchain tree externals. use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; @@ -28,7 +28,7 @@ pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. - pub(crate) consensus: Arc, + pub(crate) consensus: Arc, /// The executor factory to execute blocks with. pub(crate) executor_factory: E, } @@ -37,7 +37,7 @@ impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, - consensus: Arc, + consensus: Arc, executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 0ebef1efe6e..ae627cae696 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -13,7 +13,7 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_consensus::{test_utils::TestConsensus, FullConsensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -332,7 +332,7 @@ where let provider_factory = create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - let consensus: Arc = match self.base_config.consensus { + let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } @@ -374,13 +374,17 @@ where .into_task(); let body_downloader = BodiesDownloaderBuilder::default() - .build(client.clone(), consensus.clone(), provider_factory.clone()) + .build( + client.clone(), + consensus.clone().as_consensus(), + provider_factory.clone(), + ) .into_task(); Pipeline::::builder().add_stages(DefaultStages::new( provider_factory.clone(), tip_rx.clone(), - Arc::clone(&consensus), + consensus.clone().as_consensus(), header_downloader, body_downloader, executor_factory.clone(), diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 3ad53456cbd..ba1b1321e77 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,8 +16,8 @@ use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, InvalidTransactionError, Receipt, - SealedBlock, SealedHeader, + BlockBody, BlockWithSenders, EthPrimitives, GotExpected, GotExpectedBoxed, + InvalidTransactionError, NodePrimitives, Receipt, SealedBlock, SealedHeader, }; use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; @@ -28,7 +28,7 @@ pub mod noop; /// test helpers for mocking consensus pub mod test_utils; -/// Post execution input passed to [`Consensus::validate_block_post_execution`]. +/// Post execution input passed to [`FullConsensus::validate_block_post_execution`]. #[derive(Debug)] pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. @@ -44,11 +44,28 @@ impl<'a, R> PostExecutionInput<'a, R> { } } -/// Consensus is a protocol that chooses canonical chain. +/// [`Consensus`] implementation which knows full node primitives and is able to validation block's +/// execution outcome. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: - AsHeaderValidator + HeaderValidator + Debug + Send + Sync +pub trait FullConsensus: + AsConsensus { + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_, N::Receipt>, + ) -> Result<(), ConsensusError>; +} + +/// Consensus is a protocol that chooses canonical chain. +#[auto_impl::auto_impl(&, Arc)] +pub trait Consensus: AsHeaderValidator { /// Ensures that body field values match the header. fn validate_body_against_header( &self, @@ -67,18 +84,6 @@ pub trait Consensus: /// Note: validating blocks does not include other validations of the Consensus fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; - - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; } /// HeaderValidator is a protocol that validates headers and their relationships. @@ -162,6 +167,23 @@ impl, H> AsHeaderValidator for T { } } +/// Helper trait to cast `Arc` to `Arc` +pub trait AsConsensus: Consensus { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} + +impl, H, B> AsConsensus for T { + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } +} + /// Consensus Errors #[derive(Debug, PartialEq, Eq, Clone, derive_more::Display, derive_more::Error)] pub enum ConsensusError { diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 6d12af08d51..c56e9867a25 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,6 +1,6 @@ -use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] @@ -44,11 +44,13 @@ impl Consensus for NoopConsensus { ) -> Result<(), ConsensusError> { Ok(()) } +} +impl FullConsensus for NoopConsensus { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index ba683dd255f..082c8ca8bb5 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,7 +1,7 @@ -use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] @@ -46,34 +46,36 @@ impl TestConsensus { } } -impl Consensus for TestConsensus { - fn validate_body_against_header( +impl FullConsensus for TestConsensus { + fn validate_block_post_execution( &self, - _body: &B, - _header: &SealedHeader, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { - if self.fail_body_against_header() { + if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } +} - fn validate_block_pre_execution( +impl Consensus for TestConsensus { + fn validate_body_against_header( &self, - _block: &SealedBlock, + _body: &B, + _header: &SealedHeader, ) -> Result<(), ConsensusError> { - if self.fail_validation() { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block_post_execution( + fn validate_block_pre_execution( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 79d9d844d73..b06750d66df 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -18,7 +18,7 @@ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ @@ -64,7 +64,7 @@ where /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc, + consensus: Arc, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 8bb26d69140..d3c07c63871 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -2,7 +2,7 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, @@ -65,7 +65,7 @@ where /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc, + consensus: Arc, executor_factory: E, chain_spec: Arc, client: Client, @@ -87,7 +87,7 @@ where let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; - let downloader = BasicBlockDownloader::new(client, consensus.clone()); + let downloader = BasicBlockDownloader::new(client, consensus.clone().as_consensus()); let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 7b8ec883892..96c03208266 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -25,7 +25,7 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_consensus::{Consensus, PostExecutionInput}; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, @@ -473,7 +473,7 @@ where { provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, @@ -557,7 +557,7 @@ where pub fn new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc, payload_validator: V, outgoing: UnboundedSender, state: EngineApiTreeState, @@ -606,7 +606,7 @@ where pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc, payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 96dfbae3f16..ba737e56728 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,14 +11,18 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, validate_header_base_fee, validate_header_extradata, validate_header_gas, }; -use reth_primitives::{BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{ + Block, BlockBody, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader, +}; use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; use std::{fmt::Debug, sync::Arc, time::SystemTime}; @@ -90,6 +94,25 @@ impl EthBeaconConsensus } } +impl FullConsensus for EthBeaconConsensus +where + ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + N: NodePrimitives< + BlockHeader = Header, + BlockBody = BlockBody, + Block = Block, + Receipt = Receipt, + >, +{ + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + impl Consensus for EthBeaconConsensus { @@ -104,14 +127,6 @@ impl Consensu fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { validate_block_pre_execution(block, &self.chain_spec) } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } impl HeaderValidator diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 9db0c44c6c6..ad673588bf9 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -327,9 +327,9 @@ pub struct EthereumConsensusBuilder { impl ConsensusBuilder for EthereumConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index d640c0dbb0e..7778fea7b5e 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -4,7 +4,7 @@ use crate::ConfigureEvm; use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; @@ -56,7 +56,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 46b6824dba7..b265dc927e7 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -8,7 +8,7 @@ use crate::{ BuilderContext, ConfigureEvm, FullNodeTypes, }; use alloy_consensus::Header; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::{NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; @@ -378,7 +378,7 @@ where Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider::Primitives>, - Cons: Consensus + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { type Components = Components; diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs index 6c90bda5475..074080d337b 100644 --- a/crates/node/builder/src/components/consensus.rs +++ b/crates/node/builder/src/components/consensus.rs @@ -1,11 +1,16 @@ //! Consensus component for the node builder. +use reth_node_api::NodeTypes; + use crate::{BuilderContext, FullNodeTypes}; use std::future::Future; /// A type that knows how to build the consensus implementation. pub trait ConsensusBuilder: Send { /// The consensus implementation to build. - type Consensus: reth_consensus::Consensus + Clone + Unpin + 'static; + type Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static; /// Creates the consensus implementation. fn build_consensus( @@ -17,7 +22,10 @@ pub trait ConsensusBuilder: Send { impl ConsensusBuilder for F where Node: FullNodeTypes, - Consensus: reth_consensus::Consensus + Clone + Unpin + 'static, + Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index a3f3017463d..a7d15dd29df 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -23,7 +23,7 @@ pub use pool::*; use crate::{ConfigureEvm, FullNodeTypes}; use alloy_consensus::Header; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; @@ -47,7 +47,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; @@ -100,7 +100,7 @@ where Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider::Primitives>, - Cons: Consensus + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; @@ -140,7 +140,7 @@ where Pool: TransactionPool, EVM: ConfigureEvm
, Executor: BlockExecutorProvider, - Cons: Consensus + Clone, + Cons: Clone, { fn clone(&self) -> Self { Self { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 830909c8cc4..2d126266a25 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -13,7 +13,6 @@ use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::Consensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -681,7 +680,6 @@ where let components = components_builder.build_components(&builder_ctx).await?; let blockchain_db = self.blockchain_db().clone(); - let consensus = Arc::new(components.consensus().clone()); let node_adapter = NodeAdapter { components, @@ -699,7 +697,6 @@ where }, node_adapter, head, - consensus, }; let ctx = LaunchContextWith { @@ -855,11 +852,6 @@ where Ok(None) } - /// Returns the configured `Consensus`. - pub fn consensus(&self) -> Arc { - self.right().consensus.clone() - } - /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() @@ -1029,7 +1021,6 @@ where db_provider_container: WithMeteredProvider, node_adapter: NodeAdapter, head: Head, - consensus: Arc, } #[cfg(test)] diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index b1141314d10..264de07048a 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -175,13 +175,15 @@ where )); info!(target: "reth::cli", "StaticFileProducer initialized"); + let consensus = Arc::new(ctx.components().consensus().clone()); + // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), @@ -223,7 +225,7 @@ where let mut engine_service = if ctx.is_dev() { let eth_service = LocalEngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.provider_factory().clone(), ctx.blockchain_db().clone(), @@ -242,7 +244,7 @@ where Either::Left(eth_service) } else { let eth_service = EngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.chain_spec(), network_client.clone(), diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 9f2c027f76b..e23ce38da75 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -236,7 +236,7 @@ where let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 0dffceaddca..30f16e4eb22 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -22,6 +22,7 @@ reth-trie-common.workspace = true # op-reth reth-optimism-forks.workspace = true reth-optimism-chainspec.workspace = true +reth-optimism-primitives.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index cb357db924a..b50efd5f6f2 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -12,7 +12,9 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, @@ -21,6 +23,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; +use reth_optimism_primitives::OpPrimitives; use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; @@ -46,6 +49,16 @@ impl OpBeaconConsensus { } } +impl FullConsensus for OpBeaconConsensus { + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + impl Consensus for OpBeaconConsensus { fn validate_body_against_header( &self, @@ -80,14 +93,6 @@ impl Consensus for OpBeaconConsensus { Ok(()) } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } impl HeaderValidator for OpBeaconConsensus { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 429cb9ae229..bdf8c3f58ee 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -569,9 +569,9 @@ pub struct OpConsensusBuilder; impl ConsensusBuilder for OpConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 46b3888f05b..3817c4d3b37 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -49,7 +49,7 @@ //! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::Consensus + Clone + 'static, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -131,7 +131,7 @@ //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::Consensus + Clone + 'static, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -194,7 +194,7 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; @@ -266,7 +266,7 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc, + consensus: Arc, ) -> Result where Provider: FullRpcProvider @@ -641,7 +641,7 @@ where Receipt = reth_primitives::Receipt, >, >, - Consensus: reth_consensus::Consensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -1293,7 +1293,7 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::Consensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { ValidationApi::new( self.provider.clone(), @@ -1324,7 +1324,7 @@ where Receipt = reth_primitives::Receipt, >, >, - Consensus: reth_consensus::Consensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 1885c8ad2e0..1aa502c1f10 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -10,7 +10,7 @@ use alloy_rpc_types_engine::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_consensus::{Consensus, PostExecutionInput}; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; @@ -44,7 +44,7 @@ where /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc, + consensus: Arc, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -475,7 +475,7 @@ pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc, + consensus: Arc, /// Execution payload validator. payload_validator: ExecutionPayloadValidator, /// Block executor factory. From 0ff2827a799a5a77c40b169bee07f9ca415b78d9 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Sat, 30 Nov 2024 18:09:49 +0700 Subject: [PATCH 790/970] refactor: use is_none_or instead of map_or (#13035) --- crates/net/eth-wire/src/capability.rs | 2 +- crates/primitives-traits/src/account.rs | 2 +- crates/revm/src/batch.rs | 2 +- crates/static-file/types/src/lib.rs | 2 +- crates/tracing/src/formatter.rs | 2 +- crates/transaction-pool/src/maintain.rs | 2 +- crates/transaction-pool/src/pool/best.rs | 2 +- crates/trie/trie/src/trie_cursor/subnode.rs | 4 ++-- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 3d8c61800eb..1e1bb1b2012 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -335,7 +335,7 @@ pub fn shared_capability_offsets( // highest wins, others are ignored if shared_capabilities .get(&peer_capability.name) - .map_or(true, |v| peer_capability.version > v.version) + .is_none_or(|v| peer_capability.version > v.version) { shared_capabilities.insert( peer_capability.name.clone(), diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index c8504f3b63c..398294b09d8 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -49,7 +49,7 @@ impl Account { pub fn is_empty(&self) -> bool { self.nonce == 0 && self.balance.is_zero() && - self.bytecode_hash.map_or(true, |hash| hash == KECCAK_EMPTY) + self.bytecode_hash.is_none_or(|hash| hash == KECCAK_EMPTY) } /// Returns an account bytecode's hash. diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 01b0bd421d7..36708b5ff32 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -112,7 +112,7 @@ impl BlockBatchRecord { /// Returns the [`BundleRetention`] for the given block based on the configured prune modes. pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { - if self.tip.map_or(true, |tip| { + if self.tip.is_none_or(|tip| { !self .prune_modes .account_history diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 4e9bf90f1c9..4fc9c545e7c 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -92,7 +92,7 @@ impl StaticFileTargets { ] .iter() .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { + target_block_range.is_none_or(|target_block_range| { *target_block_range.start() == highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { highest_static_fileted_block + 1 diff --git a/crates/tracing/src/formatter.rs b/crates/tracing/src/formatter.rs index 1322377f1c9..202a92136d2 100644 --- a/crates/tracing/src/formatter.rs +++ b/crates/tracing/src/formatter.rs @@ -54,7 +54,7 @@ impl LogFormat { .unwrap_or_else(|_| // If `RUST_LOG_TARGET` is not set, show target in logs only if the max enabled // level is higher than INFO (DEBUG, TRACE) - filter.max_level_hint().map_or(true, |max_level| max_level > tracing::Level::INFO)); + filter.max_level_hint().is_none_or(|max_level| max_level > tracing::Level::INFO)); match self { Self::Json => { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 6cf2faad9d6..cb75af7db17 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -461,7 +461,7 @@ impl FinalizedBlockTracker { let finalized = finalized_block?; self.last_finalized_block .replace(finalized) - .map_or(true, |last| last < finalized) + .is_none_or(|last| last < finalized) .then_some(finalized) } } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a4c91aae726..ed94bc67623 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -57,7 +57,7 @@ impl Iterator for BestTransactionsWithFees { if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && best.transaction .max_fee_per_blob_gas() - .map_or(true, |fee| fee >= self.base_fee_per_blob_gas as u128) + .is_none_or(|fee| fee >= self.base_fee_per_blob_gas as u128) { return Some(best); } diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index c928028eb15..457c1ba4685 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -76,7 +76,7 @@ impl CursorSubNode { pub fn state_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the tree flag is set for the current nibble. @@ -84,7 +84,7 @@ impl CursorSubNode { pub fn tree_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the current nibble has a root hash. From 489d4e8595f4eeeee4d7d7861b6646d053afd4ac Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 30 Nov 2024 13:37:05 +0100 Subject: [PATCH 791/970] feat(eecutor): send EvmState in state hook (#13025) --- crates/engine/tree/src/tree/mod.rs | 4 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/evm/src/metrics.rs | 76 ++++++++++++------------------ crates/evm/src/system_calls/mod.rs | 22 ++++----- crates/optimism/evm/src/execute.rs | 2 +- 5 files changed, 46 insertions(+), 60 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 96c03208266..cdd066cdc24 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -48,7 +48,7 @@ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use revm_primitives::ResultAndState; +use revm_primitives::EvmState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -2212,7 +2212,7 @@ where // TODO: create StateRootTask with the receiving end of a channel and // pass the sending end of the channel to the state hook. - let noop_state_hook = |_result_and_state: &ResultAndState| {}; + let noop_state_hook = |_state: &EvmState| {}; let output = self.metrics.executor.execute_metered( executor, (&block, U256::MAX).into(), diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f2a3925572b..35e97bbaad0 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -193,7 +193,7 @@ where error: Box::new(new_err), } })?; - self.system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index f42b942afd9..1f21cb4d3a4 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -8,7 +8,7 @@ use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; use reth_primitives::BlockWithSenders; -use revm_primitives::ResultAndState; +use revm_primitives::EvmState; use std::time::Instant; /// Wrapper struct that combines metrics and state hook @@ -18,13 +18,11 @@ struct MeteredStateHook { } impl OnStateHook for MeteredStateHook { - fn on_state(&mut self, result_and_state: &ResultAndState) { + fn on_state(&mut self, state: &EvmState) { // Update the metrics for the number of accounts, storage slots and bytecodes loaded - let accounts = result_and_state.state.keys().len(); - let storage_slots = - result_and_state.state.values().map(|account| account.storage.len()).sum::(); - let bytecodes = result_and_state - .state + let accounts = state.keys().len(); + let storage_slots = state.values().map(|account| account.storage.len()).sum::(); + let bytecodes = state .values() .filter(|account| !account.info.is_empty_code_hash()) .collect::>() @@ -35,7 +33,7 @@ impl OnStateHook for MeteredStateHook { self.metrics.bytecodes_loaded_histogram.record(bytecodes as f64); // Call the original state hook - self.inner_hook.on_state(result_and_state); + self.inner_hook.on_state(state); } } @@ -156,14 +154,13 @@ mod tests { use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; use revm::db::BundleState; use revm_primitives::{ - Account, AccountInfo, AccountStatus, Bytes, EvmState, EvmStorage, EvmStorageSlot, - ExecutionResult, Output, SuccessReason, B256, U256, + Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, }; use std::sync::mpsc; /// A mock executor that simulates state changes struct MockExecutor { - result_and_state: ResultAndState, + state: EvmState, } impl Executor<()> for MockExecutor { @@ -206,7 +203,7 @@ mod tests { F: OnStateHook + 'static, { // Call hook with our mock state - hook.on_state(&self.result_and_state); + hook.on_state(&self.state); Ok(BlockExecutionOutput { state: BundleState::default(), @@ -223,7 +220,7 @@ mod tests { } impl OnStateHook for ChannelStateHook { - fn on_state(&mut self, _result_and_state: &ResultAndState) { + fn on_state(&mut self, _state: &EvmState) { let _ = self.sender.send(self.output); } } @@ -249,35 +246,26 @@ mod tests { let expected_output = 42; let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); - let result_and_state = ResultAndState { - result: ExecutionResult::Success { - reason: SuccessReason::Stop, - gas_used: 100, - output: Output::Call(Bytes::default()), - logs: vec![], - gas_refunded: 0, - }, - state: { - let mut state = EvmState::default(); - let storage = - EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); - state.insert( - Default::default(), - Account { - info: AccountInfo { - balance: U256::from(100), - nonce: 10, - code_hash: B256::random(), - code: Default::default(), - }, - storage, - status: AccountStatus::Loaded, + let state = { + let mut state = EvmState::default(); + let storage = + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + state.insert( + Default::default(), + Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), }, - ); - state - }, + storage, + status: AccountStatus::Loaded, + }, + ); + state }; - let executor = MockExecutor { result_and_state }; + let executor = MockExecutor { state }; let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); let snapshot = snapshotter.snapshot().into_vec(); @@ -311,11 +299,9 @@ mod tests { let expected_output = 42; let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); - let result_and_state = ResultAndState { - result: ExecutionResult::Revert { gas_used: 0, output: Default::default() }, - state: EvmState::default(), - }; - let executor = MockExecutor { result_and_state }; + let state = EvmState::default(); + + let executor = MockExecutor { state }; let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); let actual_output = rx.try_recv().unwrap(); diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 47fd59d735f..fd9a1bf5f22 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -10,7 +10,7 @@ use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, B256}; mod eip2935; mod eip4788; @@ -19,15 +19,15 @@ mod eip7251; /// A hook that is called after each state change. pub trait OnStateHook { - /// Invoked with the result and state after each system call. - fn on_state(&mut self, state: &ResultAndState); + /// Invoked with the state after each system call. + fn on_state(&mut self, state: &EvmState); } impl OnStateHook for F where - F: FnMut(&ResultAndState), + F: FnMut(&EvmState), { - fn on_state(&mut self, state: &ResultAndState) { + fn on_state(&mut self, state: &EvmState) { self(state) } } @@ -38,7 +38,7 @@ where pub struct NoopHook; impl OnStateHook for NoopHook { - fn on_state(&mut self, _state: &ResultAndState) {} + fn on_state(&mut self, _state: &EvmState) {} } /// An ephemeral helper type for executing system calls. @@ -182,7 +182,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -237,7 +237,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -276,7 +276,7 @@ where eip7002::transact_withdrawal_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -314,7 +314,7 @@ where eip7251::transact_consolidation_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -322,7 +322,7 @@ where } /// Delegate to stored `OnStateHook`, noop if hook is `None`. - pub fn on_state(&mut self, state: &ResultAndState) { + pub fn on_state(&mut self, state: &EvmState) { if let Some(ref mut hook) = &mut self.hook { hook.on_state(state); } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 4b4bccae406..7ab9be728cc 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -221,7 +221,7 @@ where ?transaction, "Executed transaction" ); - self.system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); From e0cad9fdaeec9d92fabe3301e536dc24e79081c3 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Sat, 30 Nov 2024 22:49:00 +0700 Subject: [PATCH 792/970] feat: generic data primitives PersistenceHandle (#12968) --- crates/engine/tree/src/persistence.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 950310b170f..587fcd82b95 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,7 +2,7 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -192,15 +192,16 @@ pub enum PersistenceAction { /// A handle to the persistence service #[derive(Debug, Clone)] -pub struct PersistenceHandle { +pub struct PersistenceHandle { /// The channel used to communicate with the persistence service sender: Sender, + _marker: std::marker::PhantomData, } -impl PersistenceHandle { +impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. pub const fn new(sender: Sender) -> Self { - Self { sender } + Self { sender, _marker: std::marker::PhantomData } } /// Create a new [`PersistenceHandle`], and spawn the persistence service. From 7353dc94a87c81995c4bb327a6a48f9222005cf7 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Sat, 30 Nov 2024 22:49:54 +0700 Subject: [PATCH 793/970] feat: generic receipt ExecuteOutput (#12966) --- crates/ethereum/evm/src/execute.rs | 2 +- crates/evm/src/execute.rs | 12 +++++++----- crates/optimism/evm/src/execute.rs | 2 +- examples/custom-beacon-withdrawals/src/main.rs | 2 +- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 35e97bbaad0..fdbc9eefd3d 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -160,7 +160,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index bc6e535b7b7..7d477d2195a 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -599,14 +599,14 @@ mod tests { _chain_spec: Arc, _evm_config: EvmConfig, state: State, - execute_transactions_result: ExecuteOutput, + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { - execute_transactions_result: ExecuteOutput, + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } @@ -659,7 +659,7 @@ mod tests { &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { Ok(self.execute_transactions_result.clone()) } @@ -711,8 +711,10 @@ mod tests { fn test_strategy() { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; - let expected_execute_transactions_result = - ExecuteOutput { receipts: expected_receipts.clone(), gas_used: expected_gas_used }; + let expected_execute_transactions_result = ExecuteOutput:: { + receipts: expected_receipts.clone(), + gas_used: expected_gas_used, + }; let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 7ab9be728cc..a333978f096 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -158,7 +158,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 3d756ae92c0..26109db1e03 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -161,7 +161,7 @@ where &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) } From 9b1d676438296e3a00fd6879903315aa1ba100b5 Mon Sep 17 00:00:00 2001 From: Hoa Nguyen Date: Sat, 30 Nov 2024 23:01:19 +0700 Subject: [PATCH 794/970] feat: move eip1186 conversion helpers to reth-trie-common proofs (#12985) Co-authored-by: Matthias Seitz --- Cargo.lock | 5 +-- crates/exex/types/Cargo.toml | 2 +- crates/exex/types/src/notification.rs | 9 ++--- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-eth-api/Cargo.toml | 3 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 10 ++---- crates/rpc/rpc-eth-api/src/lib.rs | 11 +++--- crates/rpc/rpc-types-compat/Cargo.toml | 2 -- crates/rpc/rpc-types-compat/src/lib.rs | 2 -- crates/rpc/rpc-types-compat/src/proof.rs | 37 -------------------- crates/trie/common/Cargo.toml | 29 ++++++++++------ crates/trie/common/src/proofs.rs | 38 +++++++++++++++++++++ 12 files changed, 75 insertions(+), 74 deletions(-) delete mode 100644 crates/rpc/rpc-types-compat/src/proof.rs diff --git a/Cargo.lock b/Cargo.lock index e7859a6cc30..424ff503b0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9001,6 +9001,7 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "reth-trie", + "reth-trie-common", "revm", "revm-inspectors", "revm-primitives", @@ -9093,10 +9094,8 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-serde", "jsonrpsee-types", "reth-primitives", - "reth-trie-common", "serde", "serde_json", ] @@ -9403,6 +9402,8 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", "alloy-trie", "arbitrary", "bincode", diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 4d99bd7e657..b7e659d80a8 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, optional = true } reth-primitives-traits.workspace = true # reth diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 44eeb25084a..19e47c0a1da 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -73,12 +73,11 @@ impl From> for ExExNotification

/// Bincode-compatible [`ExExNotification`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use std::sync::Arc; - use reth_execution_types::serde_bincode_compat::Chain; use reth_primitives::{EthPrimitives, NodePrimitives}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::sync::Arc; /// Bincode-compatible [`super::ExExNotification`] serde implementation. /// @@ -171,16 +170,14 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use std::sync::Arc; - + use super::super::{serde_bincode_compat, ExExNotification}; use arbitrary::Arbitrary; use rand::Rng; use reth_execution_types::Chain; use reth_primitives::SealedBlockWithSenders; use serde::{Deserialize, Serialize}; use serde_with::serde_as; - - use super::super::{serde_bincode_compat, ExExNotification}; + use std::sync::Arc; #[test] fn test_exex_notification_bincode_roundtrip() { diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 4854ac44dc5..f9f05da33d3 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -54,7 +54,6 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-testing-utils.workspace = true - alloy-rlp.workspace = true assert_matches.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index e4b1b28074f..859caa821cd 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -29,8 +29,9 @@ reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true -reth-trie.workspace = true reth-node-api.workspace = true +reth-trie.workspace = true +reth-trie-common = { workspace = true, features = ["eip1186"] } # ethereum alloy-serde.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7ff9fa4deff..a8ca28feda0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,6 +1,7 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. - +use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; @@ -15,14 +16,9 @@ use reth_provider::{ StateProviderFactory, }; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; -use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; - -use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; - /// Helper methods for `eth_` methods relating to state (accounts). pub trait EthState: LoadState + SpawnBlocking { /// Returns the maximum number of blocks into the past for generating state proofs. @@ -122,7 +118,7 @@ pub trait EthState: LoadState + SpawnBlocking { let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; - Ok(from_primitive_account_proof(proof, keys)) + Ok(proof.into_eip1186_response(keys)) }) .await }) diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index cb97a03e8b8..c4a255985cb 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -20,16 +20,15 @@ pub mod node; pub mod pubsub; pub mod types; -pub use reth_rpc_eth_types::error::{ - AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, -}; -pub use reth_rpc_types_compat::TransactionCompat; - pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; +pub use reth_rpc_types_compat::TransactionCompat; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] @@ -38,3 +37,5 @@ pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; pub use core::EthApiClient; #[cfg(feature = "client")] pub use filter::EthFilterApiClient; + +use reth_trie_common as _; diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 887986ada12..d3944356117 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,10 +14,8 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-trie-common.workspace = true # ethereum -alloy-serde.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index c324eebc872..206d502f87d 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,7 +12,5 @@ pub mod block; pub mod engine; -pub mod proof; pub mod transaction; - pub use transaction::TransactionCompat; diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs deleted file mode 100644 index b860bc3491d..00000000000 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Compatibility functions for rpc proof related types. - -use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; -use alloy_serde::JsonStorageKey; -use reth_trie_common::{AccountProof, StorageProof}; - -/// Creates a new rpc storage proof from a primitive storage proof type. -pub fn from_primitive_storage_proof( - proof: StorageProof, - slot: JsonStorageKey, -) -> EIP1186StorageProof { - EIP1186StorageProof { key: slot, value: proof.value, proof: proof.proof } -} - -/// Creates a new rpc account proof from a primitive account proof type. -pub fn from_primitive_account_proof( - proof: AccountProof, - slots: Vec, -) -> EIP1186AccountProofResponse { - let info = proof.info.unwrap_or_default(); - EIP1186AccountProofResponse { - address: proof.address, - balance: info.balance, - code_hash: info.get_bytecode_hash(), - nonce: info.nonce, - storage_hash: proof.storage_root, - account_proof: proof.proof, - storage_proof: proof - .storage_proofs - .into_iter() - .filter_map(|proof| { - let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; - Some(from_primitive_storage_proof(proof, *input_slot)) - }) - .collect(), - } -} diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 8b0d930b0c2..9f81d020eb3 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -17,12 +17,14 @@ alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie.workspace = true alloy-consensus.workspace = true -alloy-genesis.workspace = true - reth-primitives-traits.workspace = true reth-codecs.workspace = true revm-primitives.workspace = true +alloy-genesis.workspace = true +alloy-rpc-types-eth = { workspace = true, optional = true } +alloy-serde = { workspace = true, optional = true } + bytes.workspace = true derive_more.workspace = true itertools.workspace = true @@ -54,6 +56,10 @@ serde_json.workspace = true serde_with.workspace = true [features] +eip1186 = [ + "dep:alloy-rpc-types-eth", + "dep:alloy-serde", +] serde = [ "dep:serde", "bytes/serde", @@ -61,6 +67,7 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-trie/serde", + "alloy-rpc-types-eth/serde", "revm-primitives/serde", "reth-primitives-traits/serde", "reth-codecs/serde" @@ -79,14 +86,16 @@ test-utils = [ "reth-codecs/test-utils", ] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "nybbles/arbitrary", - "revm-primitives/arbitrary", - "reth-codecs/arbitrary", + "alloy-trie/arbitrary", + "dep:arbitrary", + "alloy-serde/arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", + "alloy-rpc-types-eth?/arbitrary" ] [[bench]] diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 78659116c3e..517f9fb7ca8 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -190,6 +190,33 @@ pub struct AccountProof { pub storage_proofs: Vec, } +#[cfg(feature = "eip1186")] +impl AccountProof { + /// Convert into an EIP-1186 account proof response + pub fn into_eip1186_response( + self, + slots: Vec, + ) -> alloy_rpc_types_eth::EIP1186AccountProofResponse { + let info = self.info.unwrap_or_default(); + alloy_rpc_types_eth::EIP1186AccountProofResponse { + address: self.address, + balance: info.balance, + code_hash: info.get_bytecode_hash(), + nonce: info.nonce, + storage_hash: self.storage_root, + account_proof: self.proof, + storage_proof: self + .storage_proofs + .into_iter() + .filter_map(|proof| { + let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; + Some(proof.into_eip1186_proof(*input_slot)) + }) + .collect(), + } + } +} + impl Default for AccountProof { fn default() -> Self { Self::new(Address::default()) @@ -244,6 +271,17 @@ pub struct StorageProof { pub proof: Vec, } +impl StorageProof { + /// Convert into an EIP-1186 storage proof + #[cfg(feature = "eip1186")] + pub fn into_eip1186_proof( + self, + slot: alloy_serde::JsonStorageKey, + ) -> alloy_rpc_types_eth::EIP1186StorageProof { + alloy_rpc_types_eth::EIP1186StorageProof { key: slot, value: self.value, proof: self.proof } + } +} + impl StorageProof { /// Create new storage proof from the storage slot. pub fn new(key: B256) -> Self { From 00b34479478ef5a569849c9874fac80869cbebc7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Nov 2024 17:22:21 +0100 Subject: [PATCH 795/970] cachore: disable alloy-chains default features (#13039) --- Cargo.toml | 2 +- crates/optimism/hardforks/Cargo.toml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2543684166f..c4b30ba9463 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -429,7 +429,7 @@ revm-primitives = { version = "14.0.0", features = [ ], default-features = false } # eth -alloy-chains = "0.1.32" +alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.11" alloy-primitives = { version = "0.8.11", default-features = false } alloy-rlp = "0.3.4" diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index c30566a54eb..67a04a8aa5f 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -30,7 +30,8 @@ default = ["std"] std = [ "alloy-primitives/std", "once_cell/std", - "serde?/std" + "serde?/std", + "alloy-chains/std" ] serde = [ "dep:serde", From 5a0b523189e258d0ba783425d5a8ebad642a0bee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Nov 2024 17:38:53 +0100 Subject: [PATCH 796/970] chore: remove rand feature (#13040) --- crates/ethereum-forks/Cargo.toml | 2 +- crates/net/discv4/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 60572b45979..be062fb96d9 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # ethereum alloy-chains.workspace = true -alloy-primitives = { workspace = true, features = ["serde", "rand", "rlp"] } +alloy-primitives = { workspace = true, features = ["serde", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } once_cell.workspace = true diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index f1c8410eeba..1030825a91d 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -19,7 +19,7 @@ reth-net-nat.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } # ethereum -alloy-primitives.workspace = true +alloy-primitives = { workspace = true, features = ["rand"] } alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = [ From ebd413fce23547876d8c9ec3c1874c6bac7fc895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Sat, 30 Nov 2024 17:15:47 +0100 Subject: [PATCH 797/970] feat: make `PersistenceAction` generic over `NodePrimitives` (#13019) --- crates/chain-state/src/in_memory.rs | 14 +++++++------- crates/engine/tree/src/persistence.rs | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f43aae562e0..349758725aa 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,8 +12,8 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, HeaderExt, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, TransactionMeta, + BlockWithSenders, EthPrimitives, HeaderExt, NodePrimitives, Receipts, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; @@ -51,7 +51,7 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. blocks: RwLock>>>, /// Mapping of block numbers to block hashes. @@ -166,7 +166,7 @@ type PendingBlockAndReceipts = /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { +pub struct CanonicalInMemoryState { pub(crate) inner: Arc>, } @@ -598,7 +598,7 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. @@ -801,7 +801,7 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. pub block: Arc>, /// Block's senders. @@ -861,7 +861,7 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 587fcd82b95..dcdeee67448 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -169,13 +169,13 @@ pub enum PersistenceError { /// A signal to the persistence service that part of the tree state can be persisted. #[derive(Debug)] -pub enum PersistenceAction { +pub enum PersistenceAction { /// The section of tree state that should be persisted. These blocks are expected in order of /// increasing block number. /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec, oneshot::Sender>), + SaveBlocks(Vec>, oneshot::Sender>), /// Removes block data above the given block number from the database. /// From 46346251818d6bd6cf5984d8d425807e1bc93ed3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 30 Nov 2024 21:16:06 +0400 Subject: [PATCH 798/970] feat: make ommers table generic over header (#13038) --- .../cli/commands/src/test_vectors/tables.rs | 2 +- crates/optimism/storage/src/lib.rs | 4 +- crates/storage/db-api/src/models/blocks.rs | 38 +++++++++++++++++-- crates/storage/db-api/src/models/mod.rs | 12 +++--- crates/storage/db/src/tables/mod.rs | 5 ++- 5 files changed, 45 insertions(+), 16 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index fd7d3b3799d..acb811b75df 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -72,7 +72,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { (HeaderNumbers, PER_TABLE, TABLE), (Headers

, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), - (BlockOmmers, 100, TABLE), + (BlockOmmers
, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 391f26093ba..0db8f4e20a9 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -13,7 +13,7 @@ mod tests { use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; use reth_db_api::models::{ - CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, + CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockWithdrawals, }; use reth_primitives::{Account, Receipt}; @@ -43,7 +43,6 @@ mod tests { assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); @@ -67,7 +66,6 @@ mod tests { validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); } diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 0145ceb52b5..7c4b37b254d 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -8,12 +8,30 @@ use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. /// /// It is stored as the headers of the block's uncles. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] -pub struct StoredBlockOmmers { +pub struct StoredBlockOmmers { /// The block headers of this block's uncles. - pub ommers: Vec
, + pub ommers: Vec, +} + +impl Compact for StoredBlockOmmers { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut buffer = bytes::BytesMut::new(); + self.ommers.to_compact(&mut buffer); + let total_length = buffer.len(); + buf.put(buffer); + total_length + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + let (ommers, new_buf) = Vec::from_compact(buf, buf.len()); + (Self { ommers }, new_buf) + } } /// Hash of the block header. @@ -31,4 +49,18 @@ mod tests { ommer.ommers.push(Header::default()); assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap()); } + + #[test] + fn fuzz_stored_block_ommers() { + fuzz_test_stored_block_ommers(StoredBlockOmmers::default()) + } + + #[test_fuzz::test_fuzz] + fn fuzz_test_stored_block_ommers(obj: StoredBlockOmmers) { + use reth_codecs::Compact; + let mut buf = vec![]; + let len = obj.to_compact(&mut buf); + let (same_obj, _) = StoredBlockOmmers::from_compact(buf.as_ref(), len); + assert_eq!(obj, same_obj); + } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 5d18711922e..614dc598bdb 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -189,9 +189,9 @@ impl Decode for ClientVersion { /// Implements compression for Compact type. macro_rules! impl_compression_for_compact { - ($($name:tt),+) => { + ($($name:ident$(<$($generic:ident),*>)?),+) => { $( - impl Compress for $name { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? { type Compressed = Vec; fn compress_to_buf>(self, buf: &mut B) { @@ -199,8 +199,8 @@ macro_rules! impl_compression_for_compact { } } - impl Decompress for $name { - fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Decompress for $name$(<$($generic),*>)? { + fn decompress(value: &[u8]) -> Result<$name$(<$($generic),*>)?, $crate::DatabaseError> { let (obj, _) = Compact::from_compact(value, value.len()); Ok(obj) } @@ -222,7 +222,7 @@ impl_compression_for_compact!( StoredNibblesSubKey, StorageTrieEntry, StoredBlockBodyIndices, - StoredBlockOmmers, + StoredBlockOmmers, StoredBlockWithdrawals, Bytecode, AccountBeforeTx, @@ -339,7 +339,6 @@ mod tests { assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); @@ -360,7 +359,6 @@ mod tests { validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); } diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index a1fea62f0d8..940bb3aa259 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -139,6 +139,7 @@ macro_rules! tables { impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)? where $value: reth_db_api::table::Value + 'static + $($(,$generic: Send + Sync)*)? { const NAME: &'static str = table_names::$name; const DUPSORT: bool = tables!(@bool $($subkey)?); @@ -314,9 +315,9 @@ tables! { } /// Stores the uncles/ommers of the block. - table BlockOmmers { + table BlockOmmers { type Key = BlockNumber; - type Value = StoredBlockOmmers; + type Value = StoredBlockOmmers; } /// Stores the block withdrawals. From 3dc6f506b099056bc844f2ea824be038998cd131 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 30 Nov 2024 18:40:26 +0100 Subject: [PATCH 799/970] chore: disable alloy-rlp default features (#13042) --- Cargo.toml | 2 +- crates/chainspec/Cargo.toml | 3 ++- crates/ethereum-forks/Cargo.toml | 3 ++- crates/evm/execution-errors/Cargo.toml | 3 ++- crates/optimism/primitives/Cargo.toml | 15 ++++++++------- crates/primitives-traits/Cargo.toml | 3 ++- crates/primitives/Cargo.toml | 3 ++- crates/storage/errors/Cargo.toml | 3 ++- 8 files changed, 21 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c4b30ba9463..0f6193812c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -432,7 +432,7 @@ revm-primitives = { version = "14.0.0", features = [ alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.11" alloy-primitives = { version = "0.8.11", default-features = false } -alloy-rlp = "0.3.4" +alloy-rlp = { version = "0.3.4", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 5bac582cd8b..58b2f62b1a6 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -47,7 +47,8 @@ std = [ "alloy-trie/std", "reth-primitives-traits/std", "alloy-consensus/std", - "once_cell/std" + "once_cell/std", + "alloy-rlp/std" ] arbitrary = [ "alloy-chains/arbitrary", diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index be062fb96d9..1a08498633c 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -60,6 +60,7 @@ std = [ "rustc-hash/std", "alloy-consensus/std", "once_cell/std", - "serde?/std" + "serde?/std", + "alloy-rlp/std" ] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 721c8055110..3368eb06503 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -30,5 +30,6 @@ std = [ "reth-consensus/std", "alloy-eips/std", "alloy-primitives/std", - "revm-primitives/std" + "revm-primitives/std", + "alloy-rlp/std" ] diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index abd27300fa5..92e02f1d2dd 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -44,13 +44,14 @@ arbitrary.workspace = true [features] default = ["std", "reth-codec"] std = [ - "reth-primitives-traits/std", - "reth-primitives/std", - "reth-codecs/std", - "alloy-consensus/std", - "alloy-eips/std", - "alloy-primitives/std", - "serde/std", + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-codecs/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde/std", + "alloy-rlp/std" ] reth-codec = [ "dep:reth-codecs", diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index b625dfcd017..d56fd5bc0f2 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -62,7 +62,8 @@ std = [ "alloy-primitives/std", "revm-primitives/std", "serde?/std", - "serde_with?/std" + "serde_with?/std", + "alloy-rlp/std" ] test-utils = [ "arbitrary", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 50f89dcf698..7a38e79b1c1 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -104,7 +104,8 @@ std = [ "secp256k1?/std", "serde/std", "alloy-trie/std", - "serde_with?/std" + "serde_with?/std", + "alloy-rlp/std" ] reth-codec = [ "dep:reth-codecs", diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index ecefa5f6aca..9a31177662f 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -28,5 +28,6 @@ default = ["std"] std = [ "reth-primitives/std", "alloy-eips/std", - "alloy-primitives/std" + "alloy-primitives/std", + "alloy-rlp/std" ] From 890f082453490828258dbb9d84e131f55f65cb25 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Sat, 30 Nov 2024 12:27:20 -0500 Subject: [PATCH 800/970] feat(rpc): create revm env on demand (#13017) Co-authored-by: dkathiriya Co-authored-by: Matthias Seitz --- Cargo.lock | 1 - book/cli/reth/node.md | 4 +- crates/node/core/src/args/rpc_state_cache.rs | 10 +- crates/rpc/rpc-builder/src/config.rs | 2 +- crates/rpc/rpc-builder/src/eth.rs | 7 +- crates/rpc/rpc-builder/tests/it/http.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 17 ++- crates/rpc/rpc-eth-types/Cargo.toml | 1 - crates/rpc/rpc-eth-types/src/cache/config.rs | 8 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 136 +++++++----------- crates/rpc/rpc-server-types/src/constants.rs | 4 +- crates/rpc/rpc/src/eth/core.rs | 2 +- crates/rpc/rpc/src/eth/helpers/state.rs | 6 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 2 +- 14 files changed, 91 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 424ff503b0f..4da864b1bea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9028,7 +9028,6 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", - "reth-evm", "reth-execution-types", "reth-metrics", "reth-primitives", diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index d1bca0a43b2..d8b66588056 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -381,8 +381,8 @@ RPC State Cache: [default: 2000] - --rpc-cache.max-envs - Max number of bytes for cached env data + --rpc-cache.max-envs + Max number of headers in cache [default: 1000] diff --git a/crates/node/core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs index 9169d40b317..b140d47b5fe 100644 --- a/crates/node/core/src/args/rpc_state_cache.rs +++ b/crates/node/core/src/args/rpc_state_cache.rs @@ -1,6 +1,6 @@ use clap::Args; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -22,12 +22,12 @@ pub struct RpcStateCacheArgs { )] pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. #[arg( long = "rpc-cache.max-envs", - default_value_t = DEFAULT_ENV_CACHE_MAX_LEN, + default_value_t = DEFAULT_HEADER_CACHE_MAX_LEN, )] - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. #[arg( @@ -42,7 +42,7 @@ impl Default for RpcStateCacheArgs { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index daff81fa2ae..967f5840c01 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -113,7 +113,7 @@ impl RethRpcServerConfig for RpcServerArgs { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, max_receipts: self.rpc_state_cache.max_receipts, - max_envs: self.rpc_state_cache.max_envs, + max_headers: self.rpc_state_cache.max_headers, max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests, } } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 59b3ef870fe..2a781fc0859 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -65,12 +65,7 @@ where EvmConfig: ConfigureEvm
, Tasks: TaskSpawner + Clone + 'static, { - let cache = EthStateCache::spawn_with( - provider.clone(), - config.cache, - executor.clone(), - evm_config.clone(), - ); + let cache = EthStateCache::spawn_with(provider.clone(), config.cache, executor.clone()); let new_canonical_blocks = events.canonical_state_stream(); let c = cache.clone(); diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 8393d9427a6..a8393b0a92e 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -259,7 +259,7 @@ where Some(block_number.into()), ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::estimate_gas( client, call_request.clone(), @@ -267,7 +267,7 @@ where None, ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::call( client, call_request.clone(), @@ -276,7 +276,7 @@ where None, ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::syncing(client).await.unwrap(); EthApiClient::::send_transaction( client, @@ -368,13 +368,15 @@ where .unwrap_err(); TraceApiClient::trace_call_many(client, vec![], Some(BlockNumberOrTag::Latest.into())) .await - .unwrap(); + .unwrap_err(); TraceApiClient::replay_transaction(client, B256::default(), HashSet::default()) .await .err() .unwrap(); - TraceApiClient::trace_block(client, block_id).await.unwrap(); - TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()).await.unwrap(); + TraceApiClient::trace_block(client, block_id).await.unwrap_err(); + TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()) + .await + .unwrap_err(); TraceApiClient::trace_filter(client, trace_filter).await.unwrap(); } diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index a8ca28feda0..50ff1b557b5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -12,8 +12,8 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; use reth_provider::{ - BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, - StateProviderFactory, + BlockIdReader, BlockNumReader, ChainSpecProvider, EvmEnvProvider as _, StateProvider, + StateProviderBox, StateProviderFactory, }; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_transaction_pool::TransactionPool; @@ -229,12 +229,15 @@ pub trait LoadState: .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; - let (cfg, env) = self - .cache() - .get_evm_env(block_hash) - .await + + let header = + self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?; + let evm_config = self.evm_config().clone(); + let (cfg, block_env) = self + .provider() + .env_with_header(&header, evm_config) .map_err(Self::Error::from_eth_err)?; - Ok((cfg, env, block_hash.into())) + Ok((cfg, block_env, block_hash.into())) } } } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 11bf6c6231d..72b153ab084 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true -reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } diff --git a/crates/rpc/rpc-eth-types/src/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs index 64999bd6bf3..001a5b4d4d5 100644 --- a/crates/rpc/rpc-eth-types/src/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -19,10 +19,10 @@ pub struct EthStateCacheConfig { /// /// Default is 2000. pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. /// /// Default is 1000. - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. /// /// Default is 512. @@ -34,7 +34,7 @@ impl Default for EthStateCacheConfig { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 70c8b1a4f54..2dcabc0d184 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,17 +1,16 @@ //! Async caching support for eth RPC +use super::{EthStateCacheConfig, MultiConsumerLruCache}; use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; -use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use schnellru::{ByLength, Limiter}; use std::{ future::Future, @@ -25,8 +24,6 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -use super::{EthStateCacheConfig, MultiConsumerLruCache}; - pub mod config; pub mod db; pub mod metrics; @@ -43,8 +40,8 @@ type BlockWithSendersResponseSender = /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>>; -/// The type that can send the response to a requested env -type EnvResponseSender = oneshot::Sender>; +/// The type that can send the response to a requested header +type HeaderResponseSender = oneshot::Sender>; type BlockLruCache = MultiConsumerLruCache< B256, @@ -56,8 +53,7 @@ type BlockLruCache = MultiConsumerLruCache< type ReceiptsLruCache = MultiConsumerLruCache>, L, ReceiptsResponseSender>; -type EnvLruCache = - MultiConsumerLruCache; +type HeaderLruCache = MultiConsumerLruCache; /// Provides async access to cached eth data /// @@ -70,26 +66,24 @@ pub struct EthStateCache { impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. - fn create( + fn create( provider: Provider, action_task_spawner: Tasks, - evm_config: EvmConfig, max_blocks: u32, max_receipts: u32, - max_envs: u32, + max_headers: u32, max_concurrent_db_operations: usize, - ) -> (Self, EthStateCacheService) { + ) -> (Self, EthStateCacheService) { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, full_block_cache: BlockLruCache::new(max_blocks, "blocks"), receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"), - evm_env_cache: EnvLruCache::new(max_envs, "evm_env"), + headers_cache: HeaderLruCache::new(max_headers, "headers"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), - evm_config, }; let cache = Self { to_service }; (cache, service) @@ -99,52 +93,46 @@ impl EthStateCache { /// [`tokio::spawn`]. /// /// See also [`Self::spawn_with`] - pub fn spawn( - provider: Provider, - config: EthStateCacheConfig, - evm_config: EvmConfig, - ) -> Self + pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where Provider: StateProviderFactory + BlockReader - + EvmEnvProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm
, { - Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) + Self::spawn_with(provider, config, TokioTaskExecutor::default()) } /// Creates a new async LRU backed cache service task and spawns it to a new task via the given /// spawner. /// /// The cache is memory limited by the given max bytes values. - pub fn spawn_with( + pub fn spawn_with( provider: Provider, config: EthStateCacheConfig, executor: Tasks, - evm_config: EvmConfig, ) -> Self where Provider: StateProviderFactory + BlockReader - + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { - let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = - config; + let EthStateCacheConfig { + max_blocks, + max_receipts, + max_headers, + max_concurrent_db_requests, + } = config; let (this, service) = Self::create( provider, executor.clone(), - evm_config, max_blocks, max_receipts, - max_envs, + max_headers, max_concurrent_db_requests, ); executor.spawn_critical("eth state cache", Box::pin(service)); @@ -188,16 +176,12 @@ impl EthStateCache { Ok(block.zip(receipts)) } - /// Requests the evm env config for the block hash. + /// Requests the header for the given hash. /// - /// Returns an error if the corresponding header (required for populating the envs) was not - /// found. - pub async fn get_evm_env( - &self, - block_hash: B256, - ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> { + /// Returns an error if the header is not found. + pub async fn get_header(&self, block_hash: B256) -> ProviderResult
{ let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetEnv { block_hash, response_tx }); + let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } } @@ -222,14 +206,13 @@ impl EthStateCache { pub(crate) struct EthStateCacheService< Provider, Tasks, - EvmConfig, LimitBlocks = ByLength, LimitReceipts = ByLength, - LimitEnvs = ByLength, + LimitHeaders = ByLength, > where LimitBlocks: Limiter>, LimitReceipts: Limiter>>, - LimitEnvs: Limiter, + LimitHeaders: Limiter, { /// The type used to lookup data from disk provider: Provider, @@ -237,8 +220,11 @@ pub(crate) struct EthStateCacheService< full_block_cache: BlockLruCache, /// The LRU cache for full blocks grouped by their hash. receipts_cache: ReceiptsLruCache, - /// The LRU cache for revm environments - evm_env_cache: EnvLruCache, + /// The LRU cache for headers. + /// + /// Headers are cached because they are required to populate the environment for execution + /// (evm). + headers_cache: HeaderLruCache, /// Sender half of the action channel. action_tx: UnboundedSender, /// Receiver half of the action channel. @@ -247,15 +233,12 @@ pub(crate) struct EthStateCacheService< action_task_spawner: Tasks, /// Rate limiter rate_limiter: Arc, - /// The type that determines how to configure the EVM. - evm_config: EvmConfig, } -impl EthStateCacheService +impl EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { fn on_new_block( &mut self, @@ -341,20 +324,18 @@ where fn update_cached_metrics(&self) { self.full_block_cache.update_cached_metrics(); self.receipts_cache.update_cached_metrics(); - self.evm_env_cache.update_cached_metrics(); + self.headers_cache.update_cached_metrics(); } } -impl Future for EthStateCacheService +impl Future for EthStateCacheService where Provider: StateProviderFactory + BlockReader - + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { type Output = (); @@ -421,39 +402,30 @@ where })); } } - CacheAction::GetEnv { block_hash, response_tx } => { - // check if env data is cached - if let Some(env) = this.evm_env_cache.get(&block_hash).cloned() { - let _ = response_tx.send(Ok(env)); + CacheAction::GetHeader { block_hash, response_tx } => { + // check if the header is cached + if let Some(header) = this.headers_cache.get(&block_hash).cloned() { + let _ = response_tx.send(Ok(header)); continue } - // env data is not in the cache, request it if this is the first + // header is not in the cache, request it if this is the first // consumer - if this.evm_env_cache.queue(block_hash, response_tx) { + if this.headers_cache.queue(block_hash, response_tx) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); let rate_limiter = this.rate_limiter.clone(); - let evm_config = this.evm_config.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { // Acquire permit let _permit = rate_limiter.acquire().await; - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id( - CfgEnv::default(), - SpecId::LATEST, - ); - let mut block_env = BlockEnv::default(); - let res = provider - .fill_env_at( - &mut cfg, - &mut block_env, - block_hash.into(), - evm_config, - ) - .map(|_| (cfg, block_env)); - let _ = action_tx.send(CacheAction::EnvResult { + let header = provider.header(&block_hash).and_then(|header| { + header.ok_or_else(|| { + ProviderError::HeaderNotFound(block_hash.into()) + }) + }); + let _ = action_tx.send(CacheAction::HeaderResult { block_hash, - res: Box::new(res), + res: Box::new(header), }); })); } @@ -472,18 +444,18 @@ where this.on_new_block(block_hash, Err(e)); } }, - CacheAction::EnvResult { block_hash, res } => { + CacheAction::HeaderResult { block_hash, res } => { let res = *res; - if let Some(queued) = this.evm_env_cache.remove(&block_hash) { + if let Some(queued) = this.headers_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { let _ = tx.send(res.clone()); } } - // cache good env data + // cache good header if let Ok(data) = res { - this.evm_env_cache.insert(block_hash, data); + this.headers_cache.insert(block_hash, data); } } CacheAction::CacheNewCanonicalChain { chain_change } => { @@ -528,9 +500,9 @@ enum CacheAction { block_hash: B256, response_tx: BlockWithSendersResponseSender, }, - GetEnv { + GetHeader { block_hash: B256, - response_tx: EnvResponseSender, + response_tx: HeaderResponseSender, }, GetReceipts { block_hash: B256, @@ -544,9 +516,9 @@ enum CacheAction { block_hash: B256, res: ProviderResult>>>, }, - EnvResult { + HeaderResult { block_hash: B256, - res: Box>, + res: Box>, }, CacheNewCanonicalChain { chain_change: ChainChange, diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 48019745a34..89b496da0fc 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -113,8 +113,8 @@ pub mod cache { /// Default cache size for the receipts cache: 2000 receipts. pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; - /// Default cache size for the env cache: 1000 envs. - pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; + /// Default cache size for the header cache: 1000 headers. + pub const DEFAULT_HEADER_CACHE_MAX_LEN: u32 = 1000; /// Default number of concurrent database requests. pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index b6b37c9f393..86e0f9f383c 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -450,7 +450,7 @@ mod tests { provider: P, ) -> EthApi { let evm_config = EthEvmConfig::new(provider.chain_spec()); - let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(provider.clone(), Default::default()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index a3e909cf6f6..1b9c5bffd21 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -52,8 +52,7 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(MAINNET.clone()); - let cache = - EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); EthApi::new( NoopProvider::default(), pool, @@ -79,8 +78,7 @@ mod tests { let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = - EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); EthApi::new( mock_provider.clone(), pool, diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 8ac0785b262..7f2ca4f772a 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -57,7 +57,7 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(noop_provider, Default::default()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); let eth_api = EthApi::new( From 7f88e62781e1d33a04db90d4831b82f50487eed6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 2 Dec 2024 04:55:17 +0400 Subject: [PATCH 801/970] feat: bump alloy (#12930) Co-authored-by: Ryan Schneider --- Cargo.lock | 174 +++++++++++------- Cargo.toml | 131 ++++++------- .../src/commands/debug_cmd/build_block.rs | 2 + crates/chain-state/src/in_memory.rs | 4 +- crates/consensus/common/src/validation.rs | 3 +- crates/engine/local/src/payload.rs | 2 + crates/engine/util/Cargo.toml | 1 + crates/engine/util/src/reorg.rs | 3 +- .../ethereum/engine-primitives/src/payload.rs | 6 + crates/ethereum/evm/src/execute.rs | 15 +- crates/ethereum/node/tests/e2e/rpc.rs | 3 +- crates/ethereum/node/tests/e2e/utils.rs | 2 + crates/ethereum/payload/src/lib.rs | 28 ++- .../execution-types/src/execution_outcome.rs | 2 +- crates/evm/src/system_calls/mod.rs | 24 ++- crates/net/eth-wire-types/src/blocks.rs | 12 +- crates/net/eth-wire-types/src/header.rs | 5 +- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/node/src/engine.rs | 2 + crates/optimism/node/src/utils.rs | 2 + crates/optimism/payload/src/builder.rs | 1 + crates/optimism/payload/src/payload.rs | 4 +- crates/optimism/primitives/src/bedrock.rs | 1 + .../primitives/src/transaction/mod.rs | 4 + crates/primitives-traits/src/receipt.rs | 2 +- crates/primitives/src/alloy_compat.rs | 2 + crates/primitives/src/receipt.rs | 2 + crates/primitives/src/traits.rs | 18 +- crates/primitives/src/transaction/mod.rs | 18 +- crates/primitives/src/transaction/pooled.rs | 10 + crates/revm/src/batch.rs | 6 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 15 +- .../rpc-eth-api/src/helpers/pending_block.rs | 1 + .../rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc-eth-types/src/receipt.rs | 2 +- .../rpc-types-compat/src/engine/payload.rs | 4 +- crates/rpc/rpc/src/debug.rs | 35 +++- crates/rpc/rpc/src/eth/bundle.rs | 16 +- crates/rpc/rpc/src/validation.rs | 10 +- crates/storage/codecs/src/alloy/header.rs | 10 +- testing/ef-tests/src/models.rs | 3 + 41 files changed, 385 insertions(+), 203 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4da864b1bea..8f68f71bffb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,14 +112,15 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" +checksum = "3a1ff8439834ab71a4b0ecd1a8ff80b3921c87615f158940c3364f399c732786" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-trie", "arbitrary", "auto_impl", "c-kzg", @@ -129,11 +130,25 @@ dependencies = [ "serde_with", ] +[[package]] +name = "alloy-consensus-any" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "519a86faaa6729464365a90c04eba68539b6d3a30f426edb4b3dafd78920d42f" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-contract" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" +checksum = "cca2b353d8b7f160dc930dfa174557acefece6deab5ecd7e6230d38858579eea" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -198,9 +213,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" +checksum = "8dedb328c2114284f767e075589ca9de8d5e9c8a91333402f4804a584ed71a38" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +234,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" +checksum = "4841e8dd4e0f53d76b501fd4c6bc21d95d688bc8ebf0ea359fc6c7ab65b48742" dependencies = [ "alloy-primitives", "alloy-serde", @@ -242,9 +257,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" +checksum = "254f770918f96dc4ec88a15e6e2e243358e1719d66b40ef814428e7697079d25" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -256,15 +271,17 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" +checksum = "931dd176c6e33355f3dc0170ec69cf5b951f4d73870b276e2c837ab35f9c5136" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", "alloy-primitives", + "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", @@ -279,9 +296,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" +checksum = "fa6ec0f23be233e851e31c5e4badfedfa9c7bc177bc37f4e03616072cd40a806" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9805d126f24be459b958973c0569c73e1aadd27d4535eee82b2b6764aa03616" +checksum = "e3bce85f0f67b2248c2eb42941bb75079ac53648569a668e8bfd7de5a831ec64" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -341,9 +358,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" +checksum = "5545e2cbf2f8f24c68bb887ba0294fa12a2f816b9e72c4f226cd137b77d0e294" dependencies = [ "alloy-chains", "alloy-consensus", @@ -382,9 +399,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" +checksum = "b633f7731a3df2f4f334001bf80436565113816c5aa5c136c1ded563051e049b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +440,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" +checksum = "aed9e40c2a73265ebf70f1e48303ee55920282e1ea5971e832873fb2d32cea74" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +465,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" +checksum = "42dea20fa715a6f39ec7adc735cfd9567342870737270ac67795d55896527772" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +478,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bfd9b2cc3a1985f1f6da5afc41120256f9f9316fcd89e054cea99dbb10172f6" +checksum = "c9eab93eabf53697b4b9095c0f9203fca3702e78d083c77a5c677bdc02bebab8" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +490,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca97963132f78ddfc60e43a017348e6d52eea983925c23652f5b330e8e02291" +checksum = "2750f4f694b27461915b9794df60177198bf733da38dde71aadfbe2946a3c0be" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -483,15 +500,28 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-rpc-types-any" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d7620e22d6ed7c58451dd303d0501ade5a8bec9dc8daef0fbc48ceffabbae1" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922fa76678d2f9f07ea1b19309b5cfbf244c6029dcba3515227b515fdd6ed4a7" +checksum = "fdbfc1b5ee81b1ef6d5e770f3bd6018eab66c7ac2ee1e897f88973b327e2fc20" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-serde", "serde", "serde_with", "thiserror 1.0.69", @@ -499,9 +529,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2253bee958658ebd614c07a61c40580e09dd1fad3f017684314442332ab753" +checksum = "51d2d4a265fb1198272cc43d8d418c0423cdfc1aebcd283be9105464874a1dda" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +539,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56294dce86af23ad6ee8df46cf8b0d292eb5d1ff67dc88a0886051e32b1faf" +checksum = "9fb843daa6feb011475f0db8c499fff5ac62e1e6012fc01d97477ddb3217a83f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,11 +560,12 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" +checksum = "df34b88df4deeac9ecfc80ad7cbb26a33e57437b9db8be5b952792feef6134bc" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", "alloy-primitives", @@ -551,9 +582,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8647f8135ee3d5de1cf196706c905c05728a4e38bb4a5b61a7214bd1ba8f60a6" +checksum = "be601847f0b13112249ed577eaa7501755e7dd3df7b037088f8b8236a4602d59" dependencies = [ "alloy-eips", "alloy-primitives", @@ -564,9 +595,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd8b4877ef520c138af702097477cdd19504a8e1e4675ba37e92ba40f2d3c6f" +checksum = "db32f30a55ea4fa9d893127a84eef52fc54d23acb34c1a5a39bfe9bd95fbc149" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -578,9 +609,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d4ab49acf90a71f7fb894dc5fd485f1f07a1e348966c714c4d1e0b7478850a8" +checksum = "af1588d8d799095a9bd55d9045b76add042ab725c37316a77da933683754aa4b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" +checksum = "43a89fd4cc3f96b3c5c0dd1cebeb63323e4659bbdc837117fa3fd5ac168df7d9" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,9 +633,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" +checksum = "532010243a96d1f8593c2246ec3971bc52303884fa1e43ca0a776798ba178910" dependencies = [ "alloy-primitives", "async-trait", @@ -616,9 +647,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8396f6dff60700bc1d215ee03d86ff56de268af96e2bf833a14d0bafcab9882" +checksum = "e8080c0ab2dc729b0cbb183843d08e78d2a1629140c9fc16234d2272abb483bd" dependencies = [ "alloy-consensus", "alloy-network", @@ -704,9 +735,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" +checksum = "b6f295f4b745fb9e4e663d70bc57aed991288912c7aaaf25767def921050ee43" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -724,9 +755,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" +checksum = "39139015a5ec127d9c895b49b484608e27fe4538544f84cdf5eae0bd36339bc6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +770,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063edc0660e81260653cc6a95777c29d54c2543a668aa5da2359fb450d25a1ba" +checksum = "d9b4f865b13bb8648e93f812b19b74838b9165212a2beb95fc386188c443a5e3" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +789,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" +checksum = "6af91e3521b8b3eac26809b1c6f9b86e3ed455dfab812f036836aabdf709b921" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -5297,9 +5328,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" +checksum = "75353c94e7515fac7d3c280bae56bff3375784a05cb44b317260606292ff6ba9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5315,9 +5346,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" +checksum = "f24feef0404861c836b8fc0a3eb0cf6f20507e63ab59a61eeb1491c0f57bc352" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5330,9 +5361,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" +checksum = "3dda5a5c4bc786f35f6c53ba611863a889790cc40a07c8160465072026795cba" dependencies = [ "alloy-consensus", "alloy-network", @@ -5345,9 +5376,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" +checksum = "ef2ab185601941f4ed04418d71e42b220a9c59353c8fb98ba8993c42590c6742" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5357,6 +5388,7 @@ dependencies = [ "alloy-serde", "async-trait", "brotli", + "cfg-if", "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", @@ -5368,9 +5400,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" +checksum = "680a86b63fe4c45fbd5dbf1ac6779409565211c4b234d20af94cf1f79d11f23a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5387,9 +5419,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.8" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" +checksum = "eeff9cf6fcdf8ef7183f254f9ad59b6e87af5084f21dfa17ba00c4448a84ddf1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7312,6 +7344,7 @@ dependencies = [ "futures", "itertools 0.13.0", "pin-project", + "reth-consensus-common", "reth-engine-primitives", "reth-errors", "reth-ethereum-forks", @@ -8301,6 +8334,7 @@ dependencies = [ "op-alloy-consensus", "reth-chainspec", "reth-consensus", + "reth-consensus-common", "reth-ethereum-forks", "reth-evm", "reth-execution-errors", @@ -9519,9 +9553,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747291a18ad6726a08dd73f8b6a6b3a844db582ecae2063ccf0a04880c44f482" +checksum = "41bbeb6004cc4ed48d27756f0479011df91a6f5642a3abab9309eda5ce67c4ad" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", diff --git a/Cargo.toml b/Cargo.toml index 0f6193812c4..d0f32114944 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -423,7 +423,7 @@ reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.11.0" +revm-inspectors = "0.12.0" revm-primitives = { version = "14.0.0", features = [ "std", ], default-features = false } @@ -436,46 +436,46 @@ alloy-rlp = { version = "0.3.4", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.4", default-features = false } -alloy-contract = { version = "0.6.4", default-features = false } -alloy-eips = { version = "0.6.4", default-features = false } -alloy-genesis = { version = "0.6.4", default-features = false } -alloy-json-rpc = { version = "0.6.4", default-features = false } -alloy-network = { version = "0.6.4", default-features = false } -alloy-network-primitives = { version = "0.6.4", default-features = false } -alloy-node-bindings = { version = "0.6.4", default-features = false } -alloy-provider = { version = "0.6.4", features = [ +alloy-consensus = { version = "0.7.0", default-features = false } +alloy-contract = { version = "0.7.0", default-features = false } +alloy-eips = { version = "0.7.0", default-features = false } +alloy-genesis = { version = "0.7.0", default-features = false } +alloy-json-rpc = { version = "0.7.0", default-features = false } +alloy-network = { version = "0.7.0", default-features = false } +alloy-network-primitives = { version = "0.7.0", default-features = false } +alloy-node-bindings = { version = "0.7.0", default-features = false } +alloy-provider = { version = "0.7.0", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.4", default-features = false } -alloy-rpc-client = { version = "0.6.4", default-features = false } -alloy-rpc-types = { version = "0.6.4", features = [ +alloy-pubsub = { version = "0.7.0", default-features = false } +alloy-rpc-client = { version = "0.7.0", default-features = false } +alloy-rpc-types = { version = "0.7.0", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.4", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.4", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.4", default-features = false } -alloy-rpc-types-debug = { version = "0.6.4", default-features = false } -alloy-rpc-types-engine = { version = "0.6.4", default-features = false } -alloy-rpc-types-eth = { version = "0.6.4", default-features = false } -alloy-rpc-types-mev = { version = "0.6.4", default-features = false } -alloy-rpc-types-trace = { version = "0.6.4", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.4", default-features = false } -alloy-serde = { version = "0.6.4", default-features = false } -alloy-signer = { version = "0.6.4", default-features = false } -alloy-signer-local = { version = "0.6.4", default-features = false } -alloy-transport = { version = "0.6.4" } -alloy-transport-http = { version = "0.6.4", features = [ +alloy-rpc-types-admin = { version = "0.7.0", default-features = false } +alloy-rpc-types-anvil = { version = "0.7.0", default-features = false } +alloy-rpc-types-beacon = { version = "0.7.0", default-features = false } +alloy-rpc-types-debug = { version = "0.7.0", default-features = false } +alloy-rpc-types-engine = { version = "0.7.0", default-features = false } +alloy-rpc-types-eth = { version = "0.7.0", default-features = false } +alloy-rpc-types-mev = { version = "0.7.0", default-features = false } +alloy-rpc-types-trace = { version = "0.7.0", default-features = false } +alloy-rpc-types-txpool = { version = "0.7.0", default-features = false } +alloy-serde = { version = "0.7.0", default-features = false } +alloy-signer = { version = "0.7.0", default-features = false } +alloy-signer-local = { version = "0.7.0", default-features = false } +alloy-transport = { version = "0.7.0" } +alloy-transport-http = { version = "0.7.0", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.4", default-features = false } -alloy-transport-ws = { version = "0.6.4", default-features = false } +alloy-transport-ipc = { version = "0.7.0", default-features = false } +alloy-transport-ws = { version = "0.7.0", default-features = false } # op -op-alloy-rpc-types = "0.6.7" -op-alloy-rpc-types-engine = "0.6.7" -op-alloy-network = "0.6.7" -op-alloy-consensus = "0.6.7" +op-alloy-rpc-types = "0.7.1" +op-alloy-rpc-types-engine = "0.7.1" +op-alloy-network = "0.7.1" +op-alloy-consensus = "0.7.1" # misc aquamarine = "0.6" @@ -611,35 +611,36 @@ tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" -#[patch.crates-io] -#alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } - -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +# [patch.crates-io] +# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } + +# op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index e08c32b93a4..c7e1be893af 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -224,6 +224,8 @@ impl> Command { suggested_fee_recipient: self.suggested_fee_recipient, // TODO: add support for withdrawals withdrawals: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; let payload_config = PayloadConfig::new( Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 349758725aa..3cd6f464562 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,8 +12,8 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, EthPrimitives, HeaderExt, NodePrimitives, Receipts, SealedBlock, - SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, + BlockWithSenders, EthPrimitives, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 8035f8bf61c..dce7d257954 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -495,7 +495,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }; // size: 0x9b5 diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 5111360d5bf..6355a2a00af 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -39,6 +39,8 @@ where .chain_spec .is_cancun_active_at_timestamp(timestamp) .then(B256::random), + target_blobs_per_block: None, + max_blobs_per_block: None, } } } diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 6eb22340ec1..54f9321f239 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-primitives.workspace = true reth-errors.workspace = true +reth-consensus-common.workspace = true reth-fs-util.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 20e2b21446a..8e9a195a181 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -415,11 +415,12 @@ where transactions_root: proofs::calculate_transaction_root(&transactions), receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), - requests_hash: None, // TODO(prague) gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), state_root: state_provider.state_root(hashed_state)?, + requests_hash: None, // TODO(prague) + target_blobs_per_block: None, // TODO(prague) }, body: BlockBody { transactions, diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 094a1df2657..ff07856f1ca 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -332,6 +332,8 @@ mod tests { .unwrap(), withdrawals: None, parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value @@ -369,6 +371,8 @@ mod tests { }, ]), parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value @@ -401,6 +405,8 @@ mod tests { ) .unwrap(), ), + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index fdbc9eefd3d..3bfc3cb2ed6 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,7 +4,7 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; use core::fmt::Display; @@ -232,7 +232,12 @@ where let deposit_requests = crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - let mut requests = Requests::new(vec![deposit_requests]); + let mut requests = Requests::default(); + + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); requests } else { @@ -1127,9 +1132,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - assert!(requests[0].is_empty(), "there should be no deposits"); - assert!(!requests[1].is_empty(), "there should be a withdrawal"); - assert!(requests[2].is_empty(), "there should be no consolidations"); + // There should be exactly one entry with withdrawal requests + assert_eq!(requests.len(), 1); + assert_eq!(requests[0][0], 1); } #[test] diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index 54bfbc8205e..664f447cf25 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,5 +1,5 @@ use crate::utils::eth_payload_attributes; -use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718, eip4844}; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; use alloy_rpc_types_beacon::relay::{ @@ -240,6 +240,7 @@ async fn test_flashbots_validate_v4() -> eyre::Result<()> { execution_payload: block_to_payload_v3(payload.block().clone()), blobs_bundle: BlobsBundleV1::new([]), execution_requests: payload.requests().unwrap_or_default().to_vec(), + target_blobs_per_block: eip4844::TARGET_BLOBS_PER_BLOCK, signature: Default::default(), }, parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index ee451b8f3c5..84741a46aa6 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -26,6 +26,8 @@ pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttribu suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }; EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 43bb0450488..a5c6434310e 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,7 +10,10 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7002::WITHDRAWAL_REQUEST_TYPE, + eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, merge::BEACON_NONCE, +}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -365,7 +368,27 @@ where ) .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - Some(Requests::new(vec![deposit_requests, withdrawal_requests, consolidation_requests])) + let mut requests = Requests::default(); + + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } + + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Some(requests) } else { None }; @@ -452,6 +475,7 @@ where blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), requests_hash, + target_blobs_per_block: None, }; let withdrawals = chain_spec diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 412269ace9c..7acbfea3366 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -334,7 +334,7 @@ impl ExecutionOutcome { } } -impl ExecutionOutcome { +impl> ExecutionOutcome { /// Returns an iterator over all block logs. pub fn logs(&self, block_number: BlockNumber) -> Option> { let index = self.block_number_to_index(block_number)?; diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index fd9a1bf5f22..2a5b80ad66d 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,9 +1,11 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, sync::Arc, vec}; +use alloc::{boxed::Box, sync::Arc}; use alloy_consensus::Header; -use alloy_eips::eip7685::Requests; +use alloy_eips::{ + eip7002::WITHDRAWAL_REQUEST_TYPE, eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, +}; use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; @@ -127,13 +129,27 @@ where DB: Database + DatabaseCommit, DB::Error: Display, { - // todo + let mut requests = Requests::default(); + // Collect all EIP-7685 requests let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } // Collect all EIP-7251 requests let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; - Ok(Requests::new(vec![withdrawal_requests, consolidation_requests])) + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Ok(requests) } /// Applies the pre-block call to the EIP-2935 blockhashes contract. diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 97bbe36b3d6..e6506e86ad7 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -258,7 +258,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }.encode(&mut data); @@ -293,7 +294,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }; @@ -393,7 +395,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, @@ -468,7 +471,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 9fa3b150d9e..883db625c6e 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -142,7 +142,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }; assert_eq!(header.hash_slow(), expected_hash); } @@ -256,6 +257,7 @@ mod tests { excess_blob_gas: Some(0), parent_beacon_block_root: None, requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -296,6 +298,7 @@ mod tests { blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index b5d6fac5073..d03a1c6490c 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -21,6 +21,7 @@ reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true reth-consensus.workspace = true +reth-consensus-common.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 57b76b904bd..063ac3617af 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -229,6 +229,8 @@ mod test { suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }, } } diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index 9cadcdcf7a1..147aaac59dc 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -55,6 +55,8 @@ pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }; OpPayloadBuilderAttributes { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index fbf99c78d9e..aeaa8ef4079 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -410,6 +410,7 @@ where blob_gas_used, excess_blob_gas, requests_hash: None, + target_blobs_per_block: None, }; // seal the block diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 1a951abadca..e243745cea6 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -370,7 +370,9 @@ mod tests { prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), withdrawals: Some([].into()), - parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into() + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(), + target_blobs_per_block: None, + max_blobs_per_block: None, }, transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), no_tx_pool: None, diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 204b34d3378..3a345abe20a 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -85,6 +85,7 @@ pub const BEDROCK_HEADER: Header = Header { excess_blob_gas: None, parent_beacon_block_root: None, requests_hash: None, + target_blobs_per_block: None, }; /// Bedrock total difficulty on Optimism Mainnet. diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 070b3d984e0..5861a3229fe 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -123,6 +123,10 @@ impl alloy_consensus::Transaction for OpTransaction { self.0.kind() } + fn is_create(&self) -> bool { + self.0.is_create() + } + fn value(&self) -> Uint<256, 4> { self.0.value() } diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2af40c447e..435748c4e1b 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -22,7 +22,7 @@ pub trait Receipt: + Clone + Default + fmt::Debug - + TxReceipt + + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable + MaybeSerde diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index a72c83996c0..06451c30b9e 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -58,6 +58,7 @@ impl TryFrom for Block { excess_blob_gas, parent_beacon_block_root, requests_hash, + target_blobs_per_block, } = block.header.inner; Ok(Self { @@ -84,6 +85,7 @@ impl TryFrom for Block { excess_blob_gas, parent_beacon_block_root, requests_hash, + target_blobs_per_block, }, body: BlockBody { transactions, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 95d707d1b2d..79e15b89d7d 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -73,6 +73,8 @@ impl Receipt { } impl TxReceipt for Receipt { + type Log = Log; + fn status_or_post_state(&self) -> Eip658Value { self.success.into() } diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs index ec4e75c8c6d..73eabd8ec98 100644 --- a/crates/primitives/src/traits.rs +++ b/crates/primitives/src/traits.rs @@ -3,8 +3,8 @@ use crate::{ BlockWithSenders, SealedBlock, }; use alloc::vec::Vec; -use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; -use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedHeader, SignedTransaction}; +use alloy_eips::eip2718::Encodable2718; +use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; use revm_primitives::{Address, B256}; /// Extension trait for [`reth_primitives_traits::Block`] implementations @@ -121,17 +121,3 @@ pub trait BlockBodyTxExt: BlockBody { } impl BlockBodyTxExt for T {} - -/// Extension trait for [`BlockHeader`] adding useful helper methods. -pub trait HeaderExt: BlockHeader { - /// TODO: remove once is released - /// - /// Returns the parent block's number and hash - /// - /// Note: for the genesis block the parent number is 0 and the parent hash is the zero hash. - fn parent_num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number().saturating_sub(1), self.parent_hash()) - } -} - -impl HeaderExt for T {} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e1524aa1dc8..f4c4a0f2997 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -308,7 +308,7 @@ impl Transaction { set_code_tx.eip2718_encode(signature, out); } #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.eip2718_encode(out), + Self::Deposit(deposit_tx) => deposit_tx.encode_2718(out), } } @@ -675,6 +675,18 @@ impl alloy_consensus::Transaction for Transaction { } } + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.is_create(), + Self::Eip2930(tx) => tx.is_create(), + Self::Eip1559(tx) => tx.is_create(), + Self::Eip4844(tx) => tx.is_create(), + Self::Eip7702(tx) => tx.is_create(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.is_create(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -1392,6 +1404,10 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().kind() } + fn is_create(&self) -> bool { + self.deref().is_create() + } + fn value(&self) -> U256 { self.deref().value() } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 979a55f2739..5015f5b8e46 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -525,6 +525,16 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } } + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_create(), + Self::Eip2930(tx) => tx.tx().is_create(), + Self::Eip1559(tx) => tx.tx().is_create(), + Self::Eip7702(tx) => tx.tx().is_create(), + Self::BlobTransaction(tx) => tx.tx().is_create(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.tx().value(), diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 36708b5ff32..c980bdc987c 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use alloy_eips::eip7685::Requests; -use alloy_primitives::{map::HashSet, Address, BlockNumber}; +use alloy_primitives::{map::HashSet, Address, BlockNumber, Log}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::Receipts; use reth_primitives_traits::Receipt; @@ -131,7 +131,7 @@ impl BlockBatchRecord { /// Save receipts to the executor. pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> where - T: Receipt, + T: Receipt, { let mut receipts = receipts.into_iter().map(Some).collect(); // Prune receipts if necessary. @@ -144,7 +144,7 @@ impl BlockBatchRecord { /// Prune receipts according to the pruning configuration. fn prune_receipts(&mut self, receipts: &mut Vec>) -> Result<(), PruneSegmentError> where - T: Receipt, + T: Receipt, { let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 1062363eafb..8b57cb1f19e 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,12 +1,17 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::{eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, eip7685::Requests}; +use alloy_eips::{ + eip1898::BlockHashOrNumber, + eip4844::BlobAndProofV1, + eip7685::{Requests, RequestsOrHash}, +}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, PraguePayloadFields, + TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -279,7 +284,11 @@ where payload, ExecutionPayloadSidecar::v4( CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, - execution_requests, + PraguePayloadFields { + requests: RequestsOrHash::Requests(execution_requests), + // TODO: add as an argument and handle in `try_into_block` + target_blobs_per_block: 0, + }, ), ) .await diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index e1cd8f5c3c2..e3ef6621bcb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -467,6 +467,7 @@ pub trait LoadPendingBlock: extra_data: Default::default(), parent_beacon_block_root, requests_hash, + target_blobs_per_block: None, }; // Convert Vec> to Vec diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 5bf6b0c02ba..43ac03adaf7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -282,7 +282,7 @@ pub trait EthTransactions: LoadTransaction { block .transactions_with_sender() .enumerate() - .find(|(_, (signer, tx))| **signer == sender && tx.nonce() == nonce) + .find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce) .map(|(index, (signer, tx))| { let tx_info = TransactionInfo { hash: Some(tx.hash()), diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 3136d42e958..b7f82782b0b 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -14,7 +14,7 @@ pub fn build_receipt( meta: TransactionMeta, receipt: &Receipt, all_receipts: &[Receipt], - build_envelope: impl FnOnce(ReceiptWithBloom) -> T, + build_envelope: impl FnOnce(ReceiptWithBloom>) -> T, ) -> EthResult> { // Note: we assume this transaction is valid, because it's mined (or part of pending block) // and we don't need to check for pre EIP-2 diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index afe50d3af5a..f504d57addc 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -5,7 +5,6 @@ use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_RO use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4895::Withdrawals, - eip7685::Requests, }; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ @@ -77,6 +76,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result let db = db.0; + let tx_info = TransactionInfo { + block_number: Some( + env.block.number.try_into().unwrap_or_default(), + ), + base_fee: Some( + env.block.basefee.try_into().unwrap_or_default(), + ), + hash: None, + block_hash: None, + index: None, + }; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; Ok(frame.into()) }) @@ -658,6 +670,17 @@ where ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; + let tx_info = TransactionInfo { + hash: transaction_context.as_ref().map(|c| c.tx_hash).unwrap_or_default(), + index: transaction_context + .as_ref() + .map(|c| c.tx_index.map(|i| i as u64)) + .unwrap_or_default(), + block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), + block_number: Some(env.block.number.try_into().unwrap_or_default()), + base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), + }; + if let Some(tracer) = tracer { return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { @@ -723,7 +746,7 @@ where let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } @@ -738,14 +761,6 @@ where ); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; - - let tx_info = TransactionInfo { - hash: transaction_context.unwrap().tx_hash, - index: transaction_context.unwrap().tx_index.map(|index| index as u64), - block_hash: transaction_context.unwrap().block_hash, - block_number: Some(env.block.number.try_into().unwrap_or_default()), - base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), - }; let frame: FlatCallFrame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_parity_builder() diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 10eec4dbf97..a80060b3377 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -55,7 +55,9 @@ where let EthCallBundle { txs, block_number, + coinbase, state_block_number, + timeout: _, timestamp, gas_limit, difficulty, @@ -106,6 +108,10 @@ where // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { block_env.timestamp = U256::from(timestamp); @@ -117,8 +123,16 @@ where block_env.difficulty = U256::from(difficulty); } + // default to call gas limit unless user requests a smaller limit + block_env.gas_limit = U256::from(self.inner.eth_api.call_gas_limit()); if let Some(gas_limit) = gas_limit { - block_env.gas_limit = U256::from(gas_limit); + let gas_limit = U256::from(gas_limit); + if gas_limit > block_env.gas_limit { + return Err( + EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() + ) + } + block_env.gas_limit = gas_limit; } if let Some(base_fee) = base_fee { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 1aa502c1f10..91a3dae2ce7 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,11 +1,12 @@ use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction, TxReceipt}; -use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use alloy_rpc_types_engine::{ BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, + PraguePayloadFields, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; @@ -386,7 +387,12 @@ where versioned_hashes: self .validate_blobs_bundle(request.request.blobs_bundle)?, }, - request.request.execution_requests.into(), + PraguePayloadFields { + requests: RequestsOrHash::Requests( + request.request.execution_requests.into(), + ), + target_blobs_per_block: request.request.target_blobs_per_block, + }, ), )? .try_seal_with_senders() diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 04b7d6ab718..418b8b9032b 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -58,6 +58,7 @@ pub(crate) struct Header { #[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { requests_hash: Option, + target_blobs_per_block: Option, } impl HeaderExt { @@ -65,7 +66,7 @@ impl HeaderExt { /// /// Required since [`Header`] uses `Option` as a field. const fn into_option(self) -> Option { - if self.requests_hash.is_some() { + if self.requests_hash.is_some() || self.target_blobs_per_block.is_some() { Some(self) } else { None @@ -78,7 +79,7 @@ impl Compact for AlloyHeader { where B: bytes::BufMut + AsMut<[u8]>, { - let extra_fields = HeaderExt { requests_hash: self.requests_hash }; + let extra_fields = HeaderExt { requests_hash: self.requests_hash, target_blobs_per_block: self.target_blobs_per_block }; let header = Header { parent_hash: self.parent_hash, @@ -128,8 +129,9 @@ impl Compact for AlloyHeader { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_hash: header.extra_fields.and_then(|h| h.requests_hash), + requests_hash: header.extra_fields.as_ref().and_then(|h| h.requests_hash), extra_data: header.extra_data, + target_blobs_per_block: header.extra_fields.as_ref().and_then(|h| h.target_blobs_per_block), }; (alloy_header, buf) } @@ -188,7 +190,7 @@ mod tests { #[test] fn test_extra_fields() { let mut header = HOLESKY_BLOCK; - header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()) }); + header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()), target_blobs_per_block: Some(3) }); let mut encoded_header = vec![]; let len = header.to_compact(&mut encoded_header); diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 292b32e8ce0..742498e81bf 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -88,6 +88,8 @@ pub struct Header { pub parent_beacon_block_root: Option, /// Requests root. pub requests_hash: Option, + /// Target blobs per block. + pub target_blobs_per_block: Option, } impl From
for SealedHeader { @@ -114,6 +116,7 @@ impl From
for SealedHeader { excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, requests_hash: value.requests_hash, + target_blobs_per_block: value.target_blobs_per_block.map(|v| v.to::()), }; Self::new(header, value.hash) } From 3f93f35c20a9e01350a21b288d4fc917e96d4854 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 2 Dec 2024 04:56:00 +0400 Subject: [PATCH 802/970] feat: add `Header` AT to `EthChainSpec` (#13046) --- crates/chainspec/src/api.rs | 9 +++++++-- crates/cli/commands/src/common.rs | 19 +++++++++++-------- crates/cli/commands/src/stage/drop.rs | 9 ++++++--- crates/exex/exex/src/dyn_context.rs | 9 +++++---- crates/node/types/src/lib.rs | 8 ++++---- crates/optimism/chainspec/src/lib.rs | 4 +++- crates/storage/db-common/src/init.rs | 5 +++-- 7 files changed, 39 insertions(+), 24 deletions(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 94b4285f92d..348051bef9c 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -14,6 +14,9 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; + /// The header type of the network. + type Header; + /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; @@ -41,7 +44,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn display_hardforks(&self) -> Box; /// The genesis header. - fn genesis_header(&self) -> &Header; + fn genesis_header(&self) -> &Self::Header; /// The genesis block specification. fn genesis(&self) -> &Genesis; @@ -64,6 +67,8 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { } impl EthChainSpec for ChainSpec { + type Header = Header; + fn chain(&self) -> Chain { self.chain } @@ -92,7 +97,7 @@ impl EthChainSpec for ChainSpec { Box::new(Self::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.genesis_header() } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index b2ad1452aa4..174eeffa396 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; @@ -54,13 +54,13 @@ pub struct EnvironmentArgs { pub db: DatabaseArgs, } -impl> EnvironmentArgs { +impl EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init>( - &self, - access: AccessRights, - ) -> eyre::Result> { + pub fn init(&self, access: AccessRights) -> eyre::Result> + where + C: ChainSpecParser, + { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); @@ -109,12 +109,15 @@ impl> Environmen /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory>( + fn create_provider_factory( &self, config: &Config, db: Arc, static_file_provider: StaticFileProvider, - ) -> eyre::Result>>> { + ) -> eyre::Result>>> + where + C: ChainSpecParser, + { let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 49bbc55ec24..b93ab1a3c40 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, tables, DatabaseError}; use reth_db_api::transaction::{DbTx, DbTxMut}; @@ -27,9 +27,12 @@ pub struct Command { stage: StageEnum, } -impl> Command { +impl Command { /// Execute `db` command - pub async fn execute>(self) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> + where + C: ChainSpecParser, + { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let tool = DbTool::new(provider_factory)?; diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index 12efa5f069b..8bda75cac45 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -4,7 +4,7 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; -use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; +use reth_node_api::{FullNodeComponents, HeaderTy, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::EthPrimitives; use reth_provider::BlockReader; @@ -18,7 +18,7 @@ pub struct ExExContextDyn { /// The current head of the blockchain at launch. pub head: Head, /// The config of the node - pub config: NodeConfig>, + pub config: NodeConfig + 'static>>, /// The loaded node config pub reth_config: reth_config::Config, /// Channel used to send [`ExExEvent`]s to the rest of the node. @@ -57,8 +57,9 @@ where Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { - let config = - ctx.config.map_chainspec(|chainspec| Box::new(chainspec) as Box); + let config = ctx.config.map_chainspec(|chainspec| { + Box::new(chainspec) as Box>> + }); let notifications = Box::new(ctx.notifications) as Box<_>; Self { diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index c0d266e5775..6e1eb81a0c8 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -31,7 +31,7 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The type used for configuration of the EVM. - type ChainSpec: EthChainSpec; + type ChainSpec: EthChainSpec
::BlockHeader>; /// The type used to perform state commitment operations. type StateCommitment: StateCommitment; /// The type responsible for writing chain primitives to storage. @@ -151,7 +151,7 @@ impl AnyNodeTypes { impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, - C: EthChainSpec + 'static, + C: EthChainSpec
+ 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { @@ -212,7 +212,7 @@ impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, - C: EthChainSpec + 'static, + C: EthChainSpec
+ 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { @@ -226,7 +226,7 @@ impl NodeTypesWithEngine for AnyNodeTypesWithEngine + 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index d552d08f18c..f3450e87324 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -253,6 +253,8 @@ pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), Deco } impl EthChainSpec for OpChainSpec { + type Header = Header; + fn chain(&self) -> alloy_chains::Chain { self.inner.chain() } @@ -281,7 +283,7 @@ impl EthChainSpec for OpChainSpec { Box::new(ChainSpec::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.inner.genesis_header() } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index ec31edd0682..9d4fb4ff02c 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -70,7 +70,7 @@ impl From for InitDatabaseError { pub fn init_genesis(factory: &PF) -> Result where PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, - PF::ProviderRW: StaticFileProviderFactory + PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter + HeaderProvider @@ -78,6 +78,7 @@ where + StateWriter + StateWriter + AsRef, + PF::ChainSpec: EthChainSpec
, { let chain = factory.chain_spec(); @@ -307,7 +308,7 @@ pub fn insert_genesis_header( ) -> ProviderResult<()> where Provider: StaticFileProviderFactory + DBProvider, - Spec: EthChainSpec, + Spec: EthChainSpec
, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); let static_file_provider = provider.static_file_provider(); From c2ab690ad1a826c519dacc818222b47074c57a13 Mon Sep 17 00:00:00 2001 From: Tien Nguyen Date: Mon, 2 Dec 2024 17:02:05 +0700 Subject: [PATCH 803/970] feat: introduce EthMessage variant to NetworkHandleMessage (#13033) --- crates/net/network/src/manager.rs | 3 +++ crates/net/network/src/network.rs | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index c1db91773e3..e123377acd2 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -644,6 +644,9 @@ impl NetworkManager { let _ = tx.send(None); } } + NetworkHandleMessage::EthMessage { peer_id, message } => { + self.swarm.sessions_mut().send_message(&peer_id, message) + } } } diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index eadeccb1549..7e0b000cf34 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,6 @@ use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, + config::NetworkMode, message::PeerMessage, protocol::RlpxSubProtocol, + swarm::NetworkConnectionState, transactions::TransactionsHandle, FetchClient, }; use alloy_primitives::B256; use enr::Enr; @@ -136,6 +136,11 @@ impl NetworkHandle { }) } + /// Send eth message to the peer. + pub fn send_eth_message(&self, peer_id: PeerId, message: PeerMessage) { + self.send_message(NetworkHandleMessage::EthMessage { peer_id, message }) + } + /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. @@ -481,6 +486,13 @@ pub(crate) enum NetworkHandleMessage, }, + /// Sends an `eth` protocol message to the peer. + EthMessage { + /// The peer to send the message to. + peer_id: PeerId, + /// The message to send to the peer's sessions. + message: PeerMessage, + }, /// Applies a reputation change to the given peer. ReputationChange(PeerId, ReputationChangeKind), /// Returns the client that can be used to interact with the network. From 04f8c58485d07a40c85d34f45a4be5610567e836 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 2 Dec 2024 11:07:32 +0100 Subject: [PATCH 804/970] perf(trie): storage multiproof overallocation (#12959) --- crates/trie/parallel/src/proof.rs | 6 +++++- crates/trie/trie/src/proof.rs | 12 ++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index f285079f252..94138823e00 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -203,7 +203,11 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - storages.insert(hashed_address, storage_multiproof); + + // We might be adding leaves that are not necessarily our proof targets. + if targets.contains_key(&hashed_address) { + storages.insert(hashed_address, storage_multiproof); + } } } } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 34315416cb8..3cb0ff6f2f7 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -115,19 +115,20 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { + let proof_targets = targets.remove(&hashed_address); + let leaf_is_proof_target = proof_targets.is_some(); let storage_prefix_set = self .prefix_sets .storage_prefix_sets .remove(&hashed_address) .unwrap_or_default(); - let proof_targets = targets.remove(&hashed_address).unwrap_or_default(); let storage_multiproof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .storage_multiproof(proof_targets)?; + .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account account_rlp.clear(); @@ -136,8 +137,11 @@ where hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - // Overwrite storage multiproof. - storages.insert(hashed_address, storage_multiproof); + // We might be adding leaves that are not necessarily our proof targets. + if leaf_is_proof_target { + // Overwrite storage multiproof. + storages.insert(hashed_address, storage_multiproof); + } } } } From ae3b3ddf427233db05f1654bdde9ee567ef3cb4e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 11:09:24 +0100 Subject: [PATCH 805/970] chore: use alloy's blockwithparent (#13052) --- .../src/headers/reverse_headers.rs | 3 +-- crates/net/p2p/src/headers/downloader.rs | 3 +-- crates/primitives-traits/src/header/mod.rs | 2 +- crates/primitives-traits/src/header/sealed.rs | 19 +++---------------- crates/primitives-traits/src/lib.rs | 2 +- 5 files changed, 7 insertions(+), 22 deletions(-) diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 63a20ff27f5..be359134e79 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1225,11 +1225,10 @@ mod tests { use super::*; use crate::headers::test_utils::child_header; use alloy_consensus::Header; - use alloy_eips::BlockNumHash; + use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; - use reth_primitives_traits::BlockWithParent; /// Tests that `replace_number` works the same way as `Option::replace` #[test] diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index eca03bdb4e7..1bc76924a6c 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,12 +1,11 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip1898::BlockWithParent, BlockHashOrNumber}; use alloy_primitives::B256; use futures::Stream; use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; -use reth_primitives_traits::BlockWithParent; use std::fmt::Debug; /// A downloader capable of fetching and yielding block headers. diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index ea5f7eafb51..abcdf4ee0cc 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::{BlockWithParent, Header, SealedHeader}; +pub use sealed::{Header, SealedHeader}; mod error; pub use error::HeaderError; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f167ffbf284..1a5163e6ba3 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,26 +1,13 @@ +use crate::InMemorySize; pub use alloy_consensus::Header; - -use core::mem; - use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; -use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; +use alloy_primitives::{keccak256, BlockHash, Sealable}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; +use core::mem; use derive_more::{AsRef, Deref}; -use crate::InMemorySize; - -/// A helper struct to store the block number/hash and its parent hash. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct BlockWithParent { - /// Parent hash. - pub parent: B256, - /// Block number/hash. - pub block: BlockNumHash, -} - /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 5bdfd01eb9c..c88da5ad7a7 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -58,7 +58,7 @@ pub use storage::StorageEntry; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; +pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// From b91d0f871198812f8bb390437af5c43424e6314b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 2 Dec 2024 11:10:37 +0100 Subject: [PATCH 806/970] feat(trie): blinded node provider (#13027) --- crates/trie/sparse/src/blinded.rs | 58 ++++ crates/trie/sparse/src/errors.rs | 5 + crates/trie/sparse/src/lib.rs | 2 + crates/trie/sparse/src/state.rs | 168 +++++----- crates/trie/sparse/src/trie.rs | 488 ++++++++++++++++++------------ 5 files changed, 456 insertions(+), 265 deletions(-) create mode 100644 crates/trie/sparse/src/blinded.rs diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs new file mode 100644 index 00000000000..3875e819da6 --- /dev/null +++ b/crates/trie/sparse/src/blinded.rs @@ -0,0 +1,58 @@ +//! Traits and default implementations related to retrieval of blinded trie nodes. + +use crate::SparseTrieError; +use alloy_primitives::Bytes; +use reth_trie_common::Nibbles; +use std::convert::Infallible; + +/// Factory for instantiating blinded node providers. +pub trait BlindedProviderFactory { + /// Type capable of fetching blinded account nodes. + type AccountNodeProvider: BlindedProvider; + /// Type capable of fetching blinded storage nodes. + type StorageNodeProvider: BlindedProvider; + + /// Returns blinded account node provider. + fn account_node_provider(&self) -> Self::AccountNodeProvider; + + /// Returns blinded storage node provider. + fn storage_node_provider(&self) -> Self::StorageNodeProvider; +} + +/// Trie node provider for retrieving blinded nodes. +pub trait BlindedProvider { + /// The error type for the provider. + type Error: Into; + + /// Retrieve blinded node by path. + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error>; +} + +/// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProviderFactory; + +impl BlindedProviderFactory for DefaultBlindedProviderFactory { + type AccountNodeProvider = DefaultBlindedProvider; + type StorageNodeProvider = DefaultBlindedProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + DefaultBlindedProvider + } + + fn storage_node_provider(&self) -> Self::StorageNodeProvider { + DefaultBlindedProvider + } +} + +/// Default blinded node provider that always returns `Ok(None)`. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProvider; + +impl BlindedProvider for DefaultBlindedProvider { + type Error = Infallible; + + fn blinded_node(&mut self, _path: Nibbles) -> Result, Self::Error> { + Ok(None) + } +} diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index a38a92395d9..ca3b279ce42 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -1,5 +1,7 @@ //! Errors for sparse trie. +use std::convert::Infallible; + use alloy_primitives::{Bytes, B256}; use reth_trie_common::Nibbles; use thiserror::Error; @@ -56,4 +58,7 @@ pub enum SparseTrieError { /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), + /// Infallible. + #[error(transparent)] + Infallible(#[from] Infallible), } diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index b3cb2c5fdff..ec5117fdbc1 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -8,3 +8,5 @@ pub use trie::*; mod errors; pub use errors::*; + +pub mod blinded; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 6444c7cd2c4..877744954ae 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,4 +1,5 @@ use crate::{ + blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, }; use alloy_primitives::{ @@ -16,11 +17,13 @@ use std::iter::Peekable; /// Sparse state trie representing lazy-loaded Ethereum state trie. #[derive(Debug)] -pub struct SparseStateTrie { +pub struct SparseStateTrie { + /// Blinded node provider factory. + provider_factory: F, /// Sparse account trie. - state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - storages: HashMap, + storages: HashMap>, /// Collection of revealed account and storage keys. revealed: HashMap>, /// Flag indicating whether trie updates should be retained. @@ -35,6 +38,7 @@ impl Default for SparseStateTrie { state: Default::default(), storages: Default::default(), revealed: Default::default(), + provider_factory: Default::default(), retain_updates: false, account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), } @@ -46,7 +50,9 @@ impl SparseStateTrie { pub fn from_state(state: SparseTrie) -> Self { Self { state, ..Default::default() } } +} +impl SparseStateTrie { /// Set the retention of branch node updates and deletions. pub const fn with_updates(mut self, retain_updates: bool) -> Self { self.retain_updates = retain_updates; @@ -64,7 +70,10 @@ impl SparseStateTrie { } /// Returns mutable reference to storage sparse trie if it was revealed. - pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { + pub fn storage_trie_mut( + &mut self, + account: &B256, + ) -> Option<&mut RevealedSparseTrie> { self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) } @@ -84,7 +93,11 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node, self.retain_updates)?; + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -115,11 +128,11 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self - .storages - .entry(account) - .or_default() - .reveal_root(root_node, self.retain_updates)?; + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -145,7 +158,11 @@ impl SparseStateTrie { if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node, self.retain_updates)?; + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in account_nodes { @@ -161,11 +178,11 @@ impl SparseStateTrie { if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { // Reveal root node if it wasn't already. - let trie = self - .storages - .entry(account) - .or_default() - .reveal_root(root_node, self.retain_updates)?; + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in storage_nodes { @@ -205,41 +222,6 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update or remove trie account based on new account info. This method will either recompute - /// the storage root based on update storage trie or look it up from existing leaf value. - /// - /// If the new account info and storage trie are empty, the account leaf will be removed. - pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { - let nibbles = Nibbles::unpack(address); - let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { - trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); - storage_trie.root().ok_or(SparseTrieError::Blind)? - } else if self.revealed.contains_key(&address) { - trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); - let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - // The account was revealed, either... - if let Some(value) = state.get_leaf_value(&nibbles) { - // ..it exists and we should take it's current storage root or... - TrieAccount::decode(&mut &value[..])?.storage_root - } else { - // ...the account is newly created and the storage trie is empty. - EMPTY_ROOT_HASH - } - } else { - return Err(SparseTrieError::Blind.into()) - }; - - if account.is_empty() && storage_root == EMPTY_ROOT_HASH { - trace!(target: "trie::sparse", ?address, "Removing account"); - self.remove_account_leaf(&nibbles) - } else { - trace!(target: "trie::sparse", ?address, "Updating account"); - self.account_rlp_buf.clear(); - TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) - } - } - /// Update the account leaf node. pub fn update_account_leaf( &mut self, @@ -250,12 +232,6 @@ impl SparseStateTrie { Ok(()) } - /// Remove the account leaf node. - pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { - self.state.remove_leaf(path)?; - Ok(()) - } - /// Update the leaf node of a storage trie at the provided address. pub fn update_storage_leaf( &mut self, @@ -263,18 +239,11 @@ impl SparseStateTrie { slot: Nibbles, value: Vec, ) -> SparseStateTrieResult<()> { - self.storages.entry(address).or_default().update_leaf(slot, value)?; - Ok(()) - } - - /// Update the leaf node of a storage trie at the provided address. - pub fn remove_storage_leaf( - &mut self, - address: B256, - slot: &Nibbles, - ) -> SparseStateTrieResult<()> { - self.storages.entry(address).or_default().remove_leaf(slot)?; - Ok(()) + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.update_leaf(slot, value)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } } /// Wipe the storage trie at the provided address. @@ -329,6 +298,67 @@ impl SparseStateTrie { } } +impl SparseStateTrie +where + F: BlindedProviderFactory, + SparseTrieError: From<::Error> + + From<::Error>, +{ + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + trace!(target: "trie::sparse", ?address, "Removing account"); + self.remove_account_leaf(&nibbles) + } else { + trace!(target: "trie::sparse", ?address, "Updating account"); + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.remove_leaf(slot)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 12a0f87e129..0dd6dc989c1 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,4 +1,7 @@ -use crate::{SparseTrieError, SparseTrieResult}; +use crate::{ + blinded::{BlindedProvider, DefaultBlindedProvider}, + SparseTrieError, SparseTrieResult, +}; use alloy_primitives::{ hex, keccak256, map::{HashMap, HashSet}, @@ -16,28 +19,53 @@ use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. -#[derive(PartialEq, Eq, Default, Debug)] -pub enum SparseTrie { +#[derive(PartialEq, Eq, Debug)] +pub enum SparseTrie

{ /// None of the trie nodes are known. - #[default] Blind, /// The trie nodes have been revealed. - Revealed(Box), + Revealed(Box>), +} + +impl

Default for SparseTrie

{ + fn default() -> Self { + Self::Blind + } } impl SparseTrie { + /// Creates new blind trie. + pub const fn blind() -> Self { + Self::Blind + } + /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { Self::Revealed(Box::default()) } + /// Reveals the root node if the trie is blinded. + /// + /// # Returns + /// + /// Mutable reference to [`RevealedSparseTrie`]. + pub fn reveal_root( + &mut self, + root: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { + self.reveal_root_with_provider(Default::default(), root, retain_updates) + } +} + +impl

SparseTrie

{ /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { matches!(self, Self::Blind) } /// Returns mutable reference to revealed sparse trie if the trie is not blind. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -50,13 +78,18 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root( + pub fn reveal_root_with_provider( &mut self, + provider: P, root: TrieNode, retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie> { + ) -> SparseTrieResult<&mut RevealedSparseTrie

> { if self.is_blind() { - *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root, retain_updates)?)) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_provider_and_root( + provider, + root, + retain_updates, + )?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -68,13 +101,6 @@ impl SparseTrie { Ok(()) } - /// Remove the leaf node. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - revealed.remove_leaf(path)?; - Ok(()) - } - /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -93,6 +119,19 @@ impl SparseTrie { } } +impl

SparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } +} + /// The representation of revealed sparse trie. /// /// ## Invariants @@ -102,27 +141,29 @@ impl SparseTrie { /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. #[derive(Clone, PartialEq, Eq)] -pub struct RevealedSparseTrie { +pub struct RevealedSparseTrie

{ + /// Blinded node provider. + provider: P, /// All trie nodes. nodes: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. prefix_set: PrefixSetMut, - /// Reusable buffer for RLP encoding of nodes. - rlp_buf: Vec, /// Retained trie updates. updates: Option, + /// Reusable buffer for RLP encoding of nodes. + rlp_buf: Vec, } -impl fmt::Debug for RevealedSparseTrie { +impl

fmt::Debug for RevealedSparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) .field("values", &self.values) .field("prefix_set", &self.prefix_set) - .field("rlp_buf", &hex::encode(&self.rlp_buf)) .field("updates", &self.updates) + .field("rlp_buf", &hex::encode(&self.rlp_buf)) .finish() } } @@ -130,11 +171,12 @@ impl fmt::Debug for RevealedSparseTrie { impl Default for RevealedSparseTrie { fn default() -> Self { Self { + provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), values: HashMap::default(), prefix_set: PrefixSetMut::default(), - rlp_buf: Vec::new(), updates: None, + rlp_buf: Vec::new(), } } } @@ -143,6 +185,28 @@ impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { let mut this = Self { + provider: Default::default(), + nodes: HashMap::default(), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + updates: None, + } + .with_updates(retain_updates); + this.reveal_node(Nibbles::default(), node)?; + Ok(this) + } +} + +impl

RevealedSparseTrie

{ + /// Create new revealed sparse trie from the given root node. + pub fn from_provider_and_root( + provider: P, + node: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult { + let mut this = Self { + provider, nodes: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), @@ -154,6 +218,18 @@ impl RevealedSparseTrie { Ok(this) } + /// Set new blinded node provider on sparse trie. + pub fn with_provider(self, provider: BP) -> RevealedSparseTrie { + RevealedSparseTrie { + provider, + nodes: self.nodes, + values: self.values, + prefix_set: self.prefix_set, + updates: self.updates, + rlp_buf: self.rlp_buf, + } + } + /// Set the retention of branch node updates and deletions. pub fn with_updates(mut self, retain_updates: bool) -> Self { if retain_updates { @@ -357,176 +433,6 @@ impl RevealedSparseTrie { Ok(()) } - /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - self.prefix_set.insert(path.clone()); - self.values.remove(path); - - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. - - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. - - let mut removed_nodes = self.take_nodes_for_path(path)?; - trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); - // Pop the first node from the stack which is the leaf node we want to remove. - let mut child = removed_nodes.pop().expect("leaf exists"); - #[cfg(debug_assertions)] - { - let mut child_path = child.path.clone(); - let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; - child_path.extend_from_slice_unchecked(key); - assert_eq!(&child_path, path); - } - - // If we don't have any other removed nodes, insert an empty node at the root. - if removed_nodes.is_empty() { - debug_assert!(self.nodes.is_empty()); - self.nodes.insert(Nibbles::default(), SparseNode::Empty); - - return Ok(()) - } - - // Walk the stack of removed nodes from the back and re-insert them back into the trie, - // adjusting the node type as needed. - while let Some(removed_node) = removed_nodes.pop() { - let removed_path = removed_node.path; - - let new_node = match &removed_node.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) - } - SparseNode::Leaf { .. } => { - unreachable!("we already popped the leaf node") - } - SparseNode::Extension { key, .. } => { - // If the node is an extension node, we need to look at its child to see if we - // need to merge them. - match &child.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { - path: child.path, - hash: *hash, - }) - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from - // another node type. - SparseNode::Leaf { key: leaf_key, .. } => { - self.nodes.remove(&child.path); - - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(leaf_key); - SparseNode::new_leaf(new_key) - } - // For an extension node, we collapse them into one extension node, - // extending the key - SparseNode::Extension { key: extension_key, .. } => { - self.nodes.remove(&child.path); - - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(extension_key); - SparseNode::new_ext(new_key) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => removed_node.node, - } - } - SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { - // If the node is a branch node, we need to check the number of children left - // after deleting the child at the given nibble. - - if let Some(removed_nibble) = removed_node.unset_branch_nibble { - state_mask.unset_bit(removed_nibble); - } - - // If only one child is left set in the branch node, we need to collapse it. - if state_mask.count_bits() == 1 { - let child_nibble = - state_mask.first_set_bit_index().expect("state mask is not empty"); - - // Get full path of the only child node left. - let mut child_path = removed_path.clone(); - child_path.push_unchecked(child_nibble); - - // Remove the only child node. - let child = self.nodes.get(&child_path).unwrap(); - - trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); - - let mut delete_child = false; - let new_node = match child { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { - path: child_path, - hash: *hash, - }) - } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - delete_child = true; - - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); - SparseNode::new_leaf(new_key) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - delete_child = true; - - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); - SparseNode::new_ext(new_key) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => { - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) - } - }; - - if delete_child { - self.nodes.remove(&child_path); - } - - if let Some(updates) = self.updates.as_mut() { - updates.removed_nodes.insert(removed_path.clone()); - } - - new_node - } - // If more than one child is left set in the branch, we just re-insert it - // as-is. - else { - SparseNode::new_branch(state_mask) - } - } - }; - - child = RemovedSparseNode { - path: removed_path.clone(), - node: new_node.clone(), - unset_branch_nibble: None, - }; - trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); - self.nodes.insert(removed_path, new_node); - } - - Ok(()) - } - /// Traverse trie nodes down to the leaf node and collect all nodes along the path. fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { let mut current = Nibbles::default(); // Start traversal from the root @@ -621,10 +527,10 @@ impl RevealedSparseTrie { /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) { - let updates_retained = self.updates.is_some(); - *self = Self::default(); + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.values = HashMap::default(); self.prefix_set = PrefixSetMut::all(); - self.updates = updates_retained.then(SparseTrieUpdates::wiped); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); } /// Return the root of the sparse trie. @@ -901,6 +807,191 @@ impl RevealedSparseTrie { } } +impl

RevealedSparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ + /// Remove leaf node from the trie. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + self.values.remove(path); + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + let mut removed_nodes = self.take_nodes_for_path(path)?; + trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path.clone(); + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend_from_slice_unchecked(key); + assert_eq!(&child_path, path); + } + + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + return Ok(()) + } + + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; + + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) + } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child.path, + hash: *hash, + }) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, + } + } + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); + } + + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); + + // Get full path of the only child node left. + let mut child_path = removed_path.clone(); + child_path.push_unchecked(child_nibble); + + trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + + if self.nodes.get(&child_path).unwrap().is_hash() { + trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); + if let Some(node) = self.provider.blinded_node(child_path.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?child_path, ?decoded, "Revealing remaining blinded branch child"); + self.reveal_node(child_path.clone(), decoded)?; + } + } + + // Get the only child node. + let child = self.nodes.get(&child_path).unwrap(); + + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child_path, + hash: *hash, + }) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; + + if delete_child { + self.nodes.remove(&child_path); + } + + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + + new_node + } + // If more than one child is left set in the branch, we just re-insert it + // as-is. + else { + SparseNode::new_branch(state_mask) + } + } + }; + + child = RemovedSparseNode { + path: removed_path.clone(), + node: new_node.clone(), + unset_branch_nibble: None, + }; + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } + + Ok(()) + } +} + /// Enum representing sparse trie node type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum SparseNodeType { @@ -1007,6 +1098,11 @@ impl SparseNode { pub const fn new_leaf(key: Nibbles) -> Self { Self::Leaf { key, hash: None } } + + /// Returns `true` if the node is a hash node. + pub const fn is_hash(&self) -> bool { + matches!(self, Self::Hash(_)) + } } #[derive(Debug)] @@ -1190,7 +1286,7 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::default().is_blind()); + assert!(SparseTrie::blind().is_blind()); assert!(!SparseTrie::revealed_empty().is_blind()); } From 1134c5d61ac4e8d596b2eaa41b5d569eb39d0ee3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 11:12:31 +0100 Subject: [PATCH 807/970] chore: mark ethereum-forks as rv32imac (#13053) --- .github/assets/check_rv32imac.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index 0112e5cec17..9a66da9fe3b 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -4,6 +4,7 @@ set +e # Disable immediate exit on error # Array of crates to check crates_to_check=( reth-codecs-derive + reth-ethereum-forks # reth-evm # reth-primitives # reth-primitives-traits From 4f19fc67b13abe7c4823a806392ef380c5efb357 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 10:15:55 +0000 Subject: [PATCH 808/970] chore(deps): weekly `cargo update` (#13043) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Roman Krasiuk --- Cargo.lock | 349 +++++++++++++++++++------------------- crates/tracing/Cargo.toml | 2 +- 2 files changed, 176 insertions(+), 175 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f68f71bffb..0116e251cca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,9 +166,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2364c782a245cf8725ea6dbfca5f530162702b5d685992ea03ce64529136cc" +checksum = "80759b3f57b3b20fa7cd8fef6479930fc95461b58ff8adea6e87e618449c8a1d" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -197,9 +197,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6cee6a35793f3db8a5ffe60e86c695f321d081a567211245f503e8c498fce8" +checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -245,9 +245,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" +checksum = "ac4b22b3e51cac09fd2adfcc73b55f447b4df669f983c13f7894ec82b607c63f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -326,9 +326,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" +checksum = "9db948902dfbae96a73c2fbf1f7abec62af034ab883e4c777c3fd29702bd6e2c" dependencies = [ "alloy-rlp", "arbitrary", @@ -339,7 +339,7 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "getrandom 0.2.15", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "hex-literal", "indexmap 2.6.0", "itoa", @@ -350,7 +350,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3", "tiny-keccak", @@ -435,7 +435,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -665,23 +665,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" +checksum = "3bfd7853b65a2b4f49629ec975fee274faf6dff15ab8894c620943398ef283c0" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" +checksum = "82ec42f342d9a9261699f8078e57a7a4fda8aaa73c1a212ed3987080e6a9cd13" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -690,31 +690,31 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" +checksum = "ed2c50e6a62ee2b4f7ab3c6d0366e5770a21cad426e109c2f40335a1b3aff3df" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" +checksum = "ac17c6e89a50fb4a758012e4b409d9a0ba575228e69b539fe37d7a1bd507ca4a" dependencies = [ "serde", "winnow", @@ -722,9 +722,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" +checksum = "c9dc0fffe397aa17628160e16b89f704098bf3c9d74d5d369ebc239575936de5" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -912,7 +912,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1135,7 +1135,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1146,7 +1146,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1184,7 +1184,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1290,7 +1290,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1387,7 +1387,7 @@ dependencies = [ "boa_macros", "indexmap 2.6.0", "num-bigint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1423,7 +1423,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "ryu-js", "serde", "serde_json", @@ -1460,7 +1460,7 @@ dependencies = [ "indexmap 2.6.0", "once_cell", "phf", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "static_assertions", ] @@ -1472,7 +1472,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -1492,7 +1492,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1509,7 +1509,7 @@ checksum = "ae85205289bab1f2c7c8a30ddf0541cf89ba2ff7dbd144feef50bbfa664288d4" dependencies = [ "fast-float", "paste", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "sptr", "static_assertions", ] @@ -1594,7 +1594,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1605,9 +1605,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1638,9 +1638,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -1682,9 +1682,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" dependencies = [ "jobserver", "libc", @@ -1812,7 +1812,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1946,9 +1946,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -2156,7 +2156,7 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.6.0", "crossterm_winapi", - "mio 1.0.2", + "mio 1.0.3", "parking_lot", "rustix", "signal-hook", @@ -2266,7 +2266,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2290,7 +2290,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2301,7 +2301,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2423,7 +2423,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2434,7 +2434,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2455,7 +2455,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "unicode-xid", ] @@ -2569,7 +2569,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2719,7 +2719,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2730,7 +2730,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2741,12 +2741,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2787,7 +2787,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3348,7 +3348,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3584,9 +3584,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -3732,9 +3732,9 @@ dependencies = [ [[package]] name = "http-range-header" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a" +checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c" [[package]] name = "http-types" @@ -3874,7 +3874,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4024,7 +4024,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4091,7 +4091,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4138,7 +4138,7 @@ checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -4213,7 +4213,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4314,9 +4314,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jni" @@ -4349,10 +4349,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -4416,7 +4417,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "thiserror 1.0.69", @@ -4461,7 +4462,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4621,15 +4622,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.165" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4794,7 +4795,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.1", + "hashbrown 0.15.2", ] [[package]] @@ -4862,9 +4863,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" +checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" dependencies = [ "ahash", "portable-atomic", @@ -4879,7 +4880,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4898,9 +4899,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ca8ecd85575fbb143b2678cb123bb818779391ec0f745b1c4a9dbabadde407" +checksum = "4a82c8add4382f29a122fa64fff1891453ed0f6b2867d971e7d60cb8dfa322ff" dependencies = [ "libc", "libproc", @@ -4920,7 +4921,7 @@ checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "indexmap 2.6.0", "metrics", "ordered-float", @@ -4993,11 +4994,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", @@ -5027,7 +5027,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5275,7 +5275,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5537,7 +5537,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5652,7 +5652,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5681,7 +5681,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5853,7 +5853,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5904,7 +5904,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6002,7 +6002,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6054,7 +6054,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", "socket2", "thiserror 2.0.3", @@ -6072,7 +6072,7 @@ dependencies = [ "getrandom 0.2.15", "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", "rustls-pki-types", "slab", @@ -6815,7 +6815,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6918,7 +6918,7 @@ dependencies = [ "reth-storage-errors", "reth-tracing", "reth-trie-common", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "strum", @@ -7498,7 +7498,7 @@ dependencies = [ "once_cell", "proptest", "proptest-derive", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "thiserror 2.0.3", ] @@ -7884,7 +7884,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "secp256k1", "serde", @@ -8751,7 +8751,7 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "thiserror 2.0.3", "tokio", "tracing", @@ -9388,7 +9388,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "revm", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "serde", "serde_json", @@ -9712,9 +9712,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" +checksum = "f81dc953b2244ddd5e7860cb0bb2a790494b898ef321d4aff8e260efab60cc88" dependencies = [ "bytemuck", "byteorder", @@ -9762,7 +9762,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.90", "unicode-ident", ] @@ -9811,9 +9811,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" dependencies = [ "rand 0.8.5", ] @@ -9857,9 +9857,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.18" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", @@ -10164,7 +10164,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10199,7 +10199,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10250,7 +10250,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10273,7 +10273,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10372,7 +10372,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 1.0.2", + "mio 1.0.3", "signal-hook", ] @@ -10467,9 +10467,9 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -10556,7 +10556,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10580,9 +10580,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.1" +version = "12.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb" +checksum = "e5ba5365997a4e375660bed52f5b42766475d5bc8ceb1bb13fea09c469ea0f49" dependencies = [ "debugid", "memmap2", @@ -10592,9 +10592,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.1" +version = "12.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10" +checksum = "beff338b2788519120f38c59ff4bb15174f52a183e547bac3d6072c2c0aa48aa" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10614,9 +10614,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -10625,14 +10625,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" +checksum = "da0523f59468a2696391f2a772edc089342aacd53c3caa2ac3264e598edf119b" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10658,7 +10658,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10735,7 +10735,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10783,7 +10783,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10794,7 +10794,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10934,7 +10934,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", + "mio 1.0.3", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -10951,7 +10951,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11122,9 +11122,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -11146,20 +11146,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -11177,9 +11177,9 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" dependencies = [ "libc", "tracing-core", @@ -11211,9 +11211,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -11221,9 +11221,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -11550,7 +11550,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11601,9 +11601,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" dependencies = [ "cfg-if", "once_cell", @@ -11612,36 +11612,37 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11649,22 +11650,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" [[package]] name = "wasm-streams" @@ -11695,9 +11696,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" dependencies = [ "js-sys", "wasm-bindgen", @@ -11821,7 +11822,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11832,7 +11833,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11843,7 +11844,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11854,7 +11855,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12129,7 +12130,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -12151,7 +12152,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12171,7 +12172,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -12192,7 +12193,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12214,7 +12215,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 59631365d60..d944b5eeeb6 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] tracing.workspace = true -tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "json"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald = "0.3" tracing-logfmt = "0.3.3" From cccbd90e7b09163a14971a26ced9bba179267eed Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 2 Dec 2024 11:09:03 +0000 Subject: [PATCH 809/970] docs(trie): duplicate comment in sparse trie (#13056) --- crates/trie/sparse/src/trie.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 0dd6dc989c1..dd609a77c99 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -821,10 +821,6 @@ where // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry // in `nodes`, but not in the `values`. - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. - let mut removed_nodes = self.take_nodes_for_path(path)?; trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); // Pop the first node from the stack which is the leaf node we want to remove. From 3855519a5083c4cb3631e4dcc7505e79cda2aba0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 12:19:20 +0100 Subject: [PATCH 810/970] chore: disable revm primitives workspace default features (#13055) --- Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d0f32114944..001a66bc4f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -424,9 +424,7 @@ reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } revm-inspectors = "0.12.0" -revm-primitives = { version = "14.0.0", features = [ - "std", -], default-features = false } +revm-primitives = { version = "14.0.0", default-features = false } # eth alloy-chains = { version = "0.1.32", default-features = false } From 1b34f3e78d5fcd01a3701645b4a8364c7f2c5a2f Mon Sep 17 00:00:00 2001 From: Querty <98064975+Quertyy@users.noreply.github.com> Date: Mon, 2 Dec 2024 20:19:34 +0900 Subject: [PATCH 811/970] feat(rpc): enhance `eth_getLogs` error handling with block range feedback (#12790) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/filter.rs | 38 +++++++++++++++++++------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 132d99a5c1a..200afadaa2e 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,14 +1,5 @@ //! `eth_` `Filter` RPC handler implementation -use std::{ - collections::HashMap, - fmt, - iter::StepBy, - ops::RangeInclusive, - sync::Arc, - time::{Duration, Instant}, -}; - use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, @@ -30,6 +21,14 @@ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; +use std::{ + collections::HashMap, + fmt, + iter::StepBy, + ops::RangeInclusive, + sync::Arc, + time::{Duration, Instant}, +}; use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, @@ -517,9 +516,11 @@ where // logs of a single block let is_multi_block_range = from_block != to_block; if is_multi_block_range && all_logs.len() > self.max_logs_per_response { - return Err(EthFilterError::QueryExceedsMaxResults( - self.max_logs_per_response, - )) + return Err(EthFilterError::QueryExceedsMaxResults { + max_logs: self.max_logs_per_response, + from_block, + to_block: num_hash.number.saturating_sub(1), + }); } } } @@ -724,8 +725,15 @@ pub enum EthFilterError { #[error("query exceeds max block range {0}")] QueryExceedsMaxBlocks(u64), /// Query result is too large. - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), + #[error("query exceeds max results {max_logs}, retry with the range {from_block}-{to_block}")] + QueryExceedsMaxResults { + /// Maximum number of logs allowed per response + max_logs: usize, + /// Start block of the suggested retry range + from_block: u64, + /// End block of the suggested retry range (last successfully processed block) + to_block: u64, + }, /// Error serving request in `eth_` namespace. #[error(transparent)] EthAPIError(#[from] EthApiError), @@ -747,7 +755,7 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { EthFilterError::EthAPIError(err) => err.into(), err @ (EthFilterError::InvalidBlockRangeParams | EthFilterError::QueryExceedsMaxBlocks(_) | - EthFilterError::QueryExceedsMaxResults(_)) => { + EthFilterError::QueryExceedsMaxResults { .. }) => { rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) } } From 088461166831ee87083d2b82f58bda9595ea55b8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 2 Dec 2024 12:25:40 +0100 Subject: [PATCH 812/970] chore(trie): remove infallible variant (#13057) --- crates/trie/sparse/src/blinded.rs | 3 +-- crates/trie/sparse/src/errors.rs | 8 +------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index 3875e819da6..592f5e7bcd0 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -3,7 +3,6 @@ use crate::SparseTrieError; use alloy_primitives::Bytes; use reth_trie_common::Nibbles; -use std::convert::Infallible; /// Factory for instantiating blinded node providers. pub trait BlindedProviderFactory { @@ -50,7 +49,7 @@ impl BlindedProviderFactory for DefaultBlindedProviderFactory { pub struct DefaultBlindedProvider; impl BlindedProvider for DefaultBlindedProvider { - type Error = Infallible; + type Error = SparseTrieError; fn blinded_node(&mut self, _path: Nibbles) -> Result, Self::Error> { Ok(None) diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index ca3b279ce42..20545957e1c 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -1,13 +1,10 @@ //! Errors for sparse trie. -use std::convert::Infallible; - +use crate::SparseNode; use alloy_primitives::{Bytes, B256}; use reth_trie_common::Nibbles; use thiserror::Error; -use crate::SparseNode; - /// Result type with [`SparseStateTrieError`] as error. pub type SparseStateTrieResult = Result; @@ -58,7 +55,4 @@ pub enum SparseTrieError { /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), - /// Infallible. - #[error(transparent)] - Infallible(#[from] Infallible), } From 8d5668a23ac9b4ec828f8b78e518b6d4858d9499 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 12:29:54 +0100 Subject: [PATCH 813/970] chore: disable reth-ethereum-forks default features in workspace (#13054) --- Cargo.lock | 1 + Cargo.toml | 2 +- crates/chainspec/Cargo.toml | 3 +- crates/ethereum/evm/Cargo.toml | 3 +- crates/evm/Cargo.toml | 23 ++++++++------- crates/net/discv4/Cargo.toml | 3 +- crates/net/dns/Cargo.toml | 3 +- crates/net/eth-wire-types/Cargo.toml | 39 +++++++++++++------------ crates/net/eth-wire-types/src/status.rs | 2 +- crates/net/eth-wire/Cargo.toml | 1 + crates/net/network-api/Cargo.toml | 3 +- crates/net/network-types/Cargo.toml | 6 +++- crates/net/network/Cargo.toml | 1 + crates/optimism/evm/Cargo.toml | 3 +- crates/optimism/hardforks/Cargo.toml | 6 ++-- crates/primitives/Cargo.toml | 3 +- crates/revm/Cargo.toml | 26 +++++++++-------- 17 files changed, 75 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0116e251cca..9a3521dccb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7431,6 +7431,7 @@ dependencies = [ "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", + "reth-ethereum-forks", "reth-primitives", "reth-primitives-traits", "serde", diff --git a/Cargo.toml b/Cargo.toml index 001a66bc4f8..521cfd88f8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -339,7 +339,7 @@ reth-eth-wire-types = { path = "crates/net/eth-wire-types" } reth-ethereum-cli = { path = "crates/ethereum/cli" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-ethereum-forks = { path = "crates/ethereum-forks" } +reth-ethereum-forks = { path = "crates/ethereum-forks", default-features = false } reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 58b2f62b1a6..e5f6c058c62 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -48,7 +48,8 @@ std = [ "reth-primitives-traits/std", "alloy-consensus/std", "once_cell/std", - "alloy-rlp/std" + "alloy-rlp/std", + "reth-ethereum-forks/std" ] arbitrary = [ "alloy-chains/arbitrary", diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 17e870e6111..4ee07259918 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -50,5 +50,6 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "secp256k1/std" + "secp256k1/std", + "reth-ethereum-forks/std" ] diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 9d6a616af98..fe5505b52bd 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -46,17 +46,18 @@ metrics-util = { workspace = true, features = ["debugging"] } [features] default = ["std"] std = [ - "dep:metrics", - "dep:reth-metrics", - "reth-consensus/std", - "reth-primitives/std", - "reth-primitives-traits/std", - "reth-revm/std", - "alloy-eips/std", - "alloy-primitives/std", - "alloy-consensus/std", - "revm-primitives/std", - "revm/std", + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "revm-primitives/std", + "revm/std", + "reth-ethereum-forks/std" ] test-utils = [ "dep:parking_lot", diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 1030825a91d..70946c6dce8 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -60,6 +60,7 @@ serde = [ "generic-array/serde", "parking_lot/serde", "rand?/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-ethereum-forks/serde" ] test-utils = ["dep:rand"] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index a52f6505744..2f71354a7dd 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -58,5 +58,6 @@ serde = [ "parking_lot/serde", "rand/serde", "secp256k1/serde", - "trust-dns-resolver/serde" + "trust-dns-resolver/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 8b89603167d..1fe97f236de 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } @@ -46,24 +47,26 @@ rand.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", - "reth-chainspec/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", - "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary" ] serde = [ - "dep:serde", - "alloy-chains/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde", - "reth-primitives-traits/serde", + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index fa73d0907fe..e19912481e4 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -4,7 +4,7 @@ use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_chainspec::{EthChainSpec, Hardforks, MAINNET}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{EthereumHardfork, ForkId, Head}; +use reth_ethereum_forks::{EthereumHardfork, ForkId, Head}; use std::fmt::{Debug, Display}; /// The status message is used in the eth protocol handshake to ensure that peers are on the same diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index ffbd3017fa6..3dd632de5c0 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -89,6 +89,7 @@ serde = [ "reth-codecs/serde", "alloy-chains/serde", "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] [[test]] diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 6d410e9db23..efb0257fc8e 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -45,5 +45,6 @@ serde = [ "reth-eth-wire-types/serde", "reth-network-types/serde", "alloy-primitives/serde", - "enr/serde" + "enr/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml index c9b8fdd5bf2..932527b91c6 100644 --- a/crates/net/network-types/Cargo.toml +++ b/crates/net/network-types/Cargo.toml @@ -26,5 +26,9 @@ serde_json = { workspace = true } tracing.workspace = true [features] -serde = ["dep:serde", "dep:humantime-serde"] +serde = [ + "dep:serde", + "dep:humantime-serde", + "reth-ethereum-forks/serde" +] test-utils = [] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index ab9e89c2ca8..a4eff9d3a90 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -123,6 +123,7 @@ serde = [ "smallvec/serde", "url/serde", "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] test-utils = [ "dep:reth-provider", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index d03a1c6490c..149aa2e953b 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -63,7 +63,8 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "revm/std" + "revm/std", + "reth-ethereum-forks/std" ] optimism = [ "reth-primitives/optimism", diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 67a04a8aa5f..1ea23069a68 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -31,10 +31,12 @@ std = [ "alloy-primitives/std", "once_cell/std", "serde?/std", - "alloy-chains/std" + "alloy-chains/std", + "reth-ethereum-forks/std" ] serde = [ "dep:serde", "alloy-chains/serde", - "alloy-primitives/serde" + "alloy-primitives/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 7a38e79b1c1..80299a06db6 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -105,7 +105,8 @@ std = [ "serde/std", "alloy-trie/std", "serde_with?/std", - "alloy-rlp/std" + "alloy-rlp/std", + "reth-ethereum-forks/std" ] reth-codec = [ "dep:reth-codecs", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 95def23a443..cc1c8edcb8d 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -37,12 +37,13 @@ alloy-consensus.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", - "alloy-primitives/std", - "revm/std", - "alloy-eips/std", - "alloy-consensus/std", - "reth-primitives-traits/std", + "reth-primitives/std", + "alloy-primitives/std", + "revm/std", + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std", + "reth-ethereum-forks/std" ] witness = ["dep:reth-trie"] test-utils = [ @@ -54,10 +55,11 @@ test-utils = [ "reth-primitives-traits/test-utils", ] serde = [ - "revm/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-consensus/serde", - "reth-primitives-traits/serde", - "reth-trie?/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "reth-primitives-traits/serde", + "reth-trie?/serde", + "reth-ethereum-forks/serde" ] From df4673ef80dff722666f4597c8c8548afea6a365 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 2 Dec 2024 13:09:39 +0100 Subject: [PATCH 814/970] trie: init storage blinded provider with account (#13058) --- crates/trie/sparse/src/blinded.rs | 6 +++--- crates/trie/sparse/src/state.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index 592f5e7bcd0..f82bdcd95bb 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -1,7 +1,7 @@ //! Traits and default implementations related to retrieval of blinded trie nodes. use crate::SparseTrieError; -use alloy_primitives::Bytes; +use alloy_primitives::{Bytes, B256}; use reth_trie_common::Nibbles; /// Factory for instantiating blinded node providers. @@ -15,7 +15,7 @@ pub trait BlindedProviderFactory { fn account_node_provider(&self) -> Self::AccountNodeProvider; /// Returns blinded storage node provider. - fn storage_node_provider(&self) -> Self::StorageNodeProvider; + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider; } /// Trie node provider for retrieving blinded nodes. @@ -39,7 +39,7 @@ impl BlindedProviderFactory for DefaultBlindedProviderFactory { DefaultBlindedProvider } - fn storage_node_provider(&self) -> Self::StorageNodeProvider { + fn storage_node_provider(&self, _account: B256) -> Self::StorageNodeProvider { DefaultBlindedProvider } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 877744954ae..0ca290e2d0c 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -129,7 +129,7 @@ impl SparseStateTrie { // Reveal root node if it wasn't already. let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(), + self.provider_factory.storage_node_provider(account), root_node, self.retain_updates, )?; @@ -179,7 +179,7 @@ impl SparseStateTrie { if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { // Reveal root node if it wasn't already. let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(), + self.provider_factory.storage_node_provider(account), root_node, self.retain_updates, )?; From dd055a4615c2e8fc42f63103bb3c3d369349a85d Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 2 Dec 2024 13:11:08 +0100 Subject: [PATCH 815/970] feat(executor): call hook with state changes after post block balance increments (#13050) --- crates/ethereum/evm/src/execute.rs | 78 +++++++++++++++-- crates/evm/src/execute.rs | 131 ++++++++++++++++++++++++++++- crates/optimism/evm/src/execute.rs | 10 ++- 3 files changed, 208 insertions(+), 11 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 3bfc3cb2ed6..65fbbdd2568 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -13,8 +13,9 @@ use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, @@ -263,8 +264,11 @@ where } // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); Ok(requests) } @@ -317,6 +321,7 @@ mod tests { use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, + eip4895::Withdrawal, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, eip7685::EMPTY_REQUESTS_HASH, }; @@ -333,9 +338,9 @@ mod tests { database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::BLOCKHASH_SERVE_WINDOW; + use revm_primitives::{address, EvmState, BLOCKHASH_SERVE_WINDOW}; use secp256k1::{Keypair, Secp256k1}; - use std::collections::HashMap; + use std::{collections::HashMap, sync::mpsc}; fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { let mut db = StateProviderTest::default(); @@ -1220,4 +1225,67 @@ mod tests { ), } } + + #[test] + fn test_balance_increment_not_duplicated() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let withdrawal_recipient = address!("1000000000000000000000000000000000000000"); + + let mut db = StateProviderTest::default(); + let initial_balance = 100; + db.insert_account( + withdrawal_recipient, + Account { balance: U256::from(initial_balance), nonce: 1, bytecode_hash: None }, + None, + HashMap::default(), + ); + + let withdrawal = + Withdrawal { index: 0, validator_index: 0, address: withdrawal_recipient, amount: 1 }; + + let header = Header { timestamp: 1, number: 1, ..Header::default() }; + + let block = BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: Some(vec![withdrawal].into()), + }, + }, + senders: vec![], + }; + + let provider = executor_provider(chain_spec); + let executor = provider.executor(StateProviderDatabase::new(&db)); + + let (tx, rx) = mpsc::channel(); + let tx_clone = tx.clone(); + + let _output = executor + .execute_with_state_hook((&block, U256::ZERO).into(), move |state: &EvmState| { + if let Some(account) = state.get(&withdrawal_recipient) { + let _ = tx_clone.send(account.info.balance); + } + }) + .expect("Block execution should succeed"); + + drop(tx); + let balance_changes: Vec = rx.try_iter().collect(); + + if let Some(final_balance) = balance_changes.last() { + let expected_final_balance = U256::from(initial_balance) + U256::from(1_000_000_000); // initial + 1 Gwei in Wei + assert_eq!( + *final_balance, expected_final_balance, + "Final balance should match expected value after withdrawal" + ); + } + } } diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 7d477d2195a..8c3e0108fcc 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -12,7 +12,10 @@ pub use reth_storage_errors::provider::ProviderError; use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; -use alloy_primitives::BlockNumber; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashMap}, + Address, BlockNumber, +}; use core::fmt::Display; use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; @@ -22,7 +25,7 @@ use revm::{ db::{states::bundle_state::BundleRetention, BundleState}, State, }; -use revm_primitives::{db::Database, U256}; +use revm_primitives::{db::Database, Account, AccountStatus, EvmState, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). @@ -499,6 +502,42 @@ where } } +/// Creates an `EvmState` from a map of balance increments and the current state +/// to load accounts from. No balance increment is done in the function. +/// Zero balance increments are ignored and won't create state entries. +pub fn balance_increment_state( + balance_increments: &HashMap, + state: &mut State, +) -> Result +where + DB: Database, +{ + let mut load_account = |address: &Address| -> Result<(Address, Account), BlockExecutionError> { + let cache_account = state.load_cache_account(*address).map_err(|_| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + let account = cache_account.account.as_ref().ok_or_else(|| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + Ok(( + *address, + Account { + info: account.info.clone(), + storage: Default::default(), + status: AccountStatus::Touched, + }, + )) + }; + + balance_increments + .iter() + .filter(|(_, &balance)| balance != 0) + .map(|(addr, _)| load_account(addr)) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; @@ -507,7 +546,7 @@ mod tests { use reth_chainspec::{ChainSpec, MAINNET}; use reth_primitives::EthPrimitives; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::{bytes, TxEnv}; + use revm_primitives::{address, bytes, AccountInfo, TxEnv, KECCAK_EMPTY}; use std::sync::Arc; #[derive(Clone, Default)] @@ -760,4 +799,90 @@ mod tests { let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); assert!(result.is_ok()); } + + fn setup_state_with_account( + addr: Address, + balance: u128, + nonce: u64, + ) -> State>> { + let db = CacheDB::>::default(); + let mut state = State::builder().with_database(db).with_bundle_update().build(); + + let account_info = AccountInfo { + balance: U256::from(balance), + nonce, + code_hash: KECCAK_EMPTY, + code: None, + }; + state.insert_account(addr, account_info); + state + } + + #[test] + fn test_balance_increment_state_zero() { + let addr = address!("1000000000000000000000000000000000000000"); + let mut state = setup_state_with_account(addr, 100, 1); + + let mut increments = HashMap::::default(); + increments.insert(addr, 0); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Zero increments should be ignored"); + } + + #[test] + fn test_balance_increment_state_empty_increments_map() { + let mut state = State::builder() + .with_database(CacheDB::>::default()) + .with_bundle_update() + .build(); + + let increments = HashMap::::default(); + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Empty increments map should return empty state"); + } + + #[test] + fn test_balance_increment_state_multiple_valid_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 50); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 2); + assert_eq!(result.get(&addr1).unwrap().info.balance, U256::from(100)); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } + + #[test] + fn test_balance_increment_state_mixed_zero_and_nonzero_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 0); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 1, "Only non-zero increments should be included"); + assert!(!result.contains_key(&addr1), "Zero increment account should not be included"); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index a333978f096..549f52c89de 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -10,8 +10,9 @@ use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, @@ -260,8 +261,11 @@ where post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); Ok(Requests::default()) } From 519a10ae99338f58a4f443dea295057469f807c1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 14:24:21 +0100 Subject: [PATCH 816/970] chore: remove OpTxType new type (#12715) --- Cargo.lock | 2 +- crates/optimism/primitives/Cargo.toml | 8 +- .../primitives/src/transaction/tx_type.rs | 289 +----------------- crates/primitives-traits/Cargo.toml | 27 +- crates/primitives-traits/src/size.rs | 7 + .../src/transaction/tx_type.rs | 29 +- .../codecs/src/alloy/transaction/optimism.rs | 52 +++- 7 files changed, 118 insertions(+), 296 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a3521dccb2..cf011d0f639 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8461,7 +8461,6 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rlp", "arbitrary", "bytes", "derive_more 1.0.0", @@ -8664,6 +8663,7 @@ dependencies = [ "bytes", "derive_more 1.0.0", "modular-bitfield", + "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 92e02f1d2dd..075cd0d13f4 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -14,14 +14,13 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["op"] } reth-codecs = { workspace = true, optional = true, features = ["optimism"] } # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true -alloy-rlp.workspace = true # op op-alloy-consensus.workspace = true @@ -37,7 +36,7 @@ derive_more.workspace = true arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] -reth-codecs = { workspace = true, features = ["test-utils"] } +reth-codecs = { workspace = true, features = ["test-utils", "optimism"] } rstest.workspace = true arbitrary.workspace = true @@ -51,12 +50,13 @@ std = [ "alloy-eips/std", "alloy-primitives/std", "serde/std", - "alloy-rlp/std" ] reth-codec = [ "dep:reth-codecs", "reth-primitives/reth-codec", "reth-primitives-traits/reth-codec", + "reth-codecs?/optimism", + "reth-primitives/reth-codec" ] serde = [ "dep:serde", diff --git a/crates/optimism/primitives/src/transaction/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs index 9976221b424..8be5f3a3d5e 100644 --- a/crates/optimism/primitives/src/transaction/tx_type.rs +++ b/crates/optimism/primitives/src/transaction/tx_type.rs @@ -1,286 +1,21 @@ -//! newtype pattern on `op_alloy_consensus::OpTxType`. -//! `OpTxType` implements `reth_primitives_traits::TxType`. -//! This type is required because a `Compact` impl is needed on the deposit tx type. +//! Optimism transaction type. -use core::fmt::Debug; - -use alloy_primitives::{U64, U8}; -use alloy_rlp::{Decodable, Encodable, Error}; -use bytes::BufMut; -use derive_more::{ - derive::{From, Into}, - Display, -}; -use op_alloy_consensus::OpTxType as AlloyOpTxType; -use reth_primitives_traits::{InMemorySize, TxType}; - -/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement -/// [`TxType`] trait. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[into(u8)] -pub struct OpTxType(AlloyOpTxType); - -impl TxType for OpTxType { - #[inline] - fn is_legacy(&self) -> bool { - matches!(self.0, AlloyOpTxType::Legacy) - } - - #[inline] - fn is_eip2930(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip2930) - } - - #[inline] - fn is_eip1559(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip1559) - } - - #[inline] - fn is_eip4844(&self) -> bool { - false - } - - #[inline] - fn is_eip7702(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip7702) - } -} - -impl InMemorySize for OpTxType { - /// Calculates a heuristic for the in-memory size of the [`OpTxType`]. - #[inline] - fn size(&self) -> usize { - core::mem::size_of::() - } -} - -impl From for U8 { - fn from(tx_type: OpTxType) -> Self { - Self::from(u8::from(tx_type)) - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: u8) -> Result { - AlloyOpTxType::try_from(value) - .map(OpTxType) - .map_err(|_| Error::Custom("Invalid transaction type")) - } -} - -impl Default for OpTxType { - fn default() -> Self { - Self(AlloyOpTxType::Legacy) - } -} - -impl PartialEq for OpTxType { - fn eq(&self, other: &u8) -> bool { - let self_as_u8: u8 = (*self).into(); - &self_as_u8 == other - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: u64) -> Result { - if value > u8::MAX as u64 { - return Err(Error::Custom("value out of range")); - } - Self::try_from(value as u8) - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: U64) -> Result { - let u64_value: u64 = value.try_into().map_err(|_| Error::Custom("value out of range"))?; - Self::try_from(u64_value) - } -} - -impl Encodable for OpTxType { - fn length(&self) -> usize { - let value: u8 = (*self).into(); - value.length() - } - - fn encode(&self, out: &mut dyn BufMut) { - let value: u8 = (*self).into(); - value.encode(out); - } -} - -impl Decodable for OpTxType { - fn decode(buf: &mut &[u8]) -> Result { - // Decode the u8 value from RLP - let value = if buf.is_empty() { - return Err(alloy_rlp::Error::InputTooShort); - } else if buf[0] == 0x80 { - 0 // Special case: RLP encoding for integer 0 is `b"\x80"` - } else { - u8::decode(buf)? - }; - - Self::try_from(value).map_err(|_| alloy_rlp::Error::Custom("Invalid transaction type")) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for OpTxType { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - use reth_codecs::txtype::*; - match self.0 { - AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, - AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, - AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, - AlloyOpTxType::Eip7702 => { - buf.put_u8(alloy_consensus::constants::EIP7702_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - AlloyOpTxType::Deposit => { - buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - } - } - - fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - use bytes::Buf; - ( - match identifier { - reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), - reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { - let extended_identifier = buf.get_u8(); - match extended_identifier { - alloy_consensus::constants::EIP7702_TX_TYPE_ID => { - Self(AlloyOpTxType::Eip7702) - } - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), - _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), - } - } - _ => panic!("Unknown identifier for OpTxType: {identifier}"), - }, - buf, - ) - } -} +pub use op_alloy_consensus::OpTxType; #[cfg(test)] mod tests { use super::*; use alloy_consensus::constants::EIP7702_TX_TYPE_ID; - use bytes::BytesMut; use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; use reth_codecs::{txtype::*, Compact}; use rstest::rstest; - #[test] - fn test_from_alloy_op_tx_type() { - let alloy_tx = AlloyOpTxType::Legacy; - let op_tx: OpTxType = OpTxType::from(alloy_tx); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_from_op_tx_type_to_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let tx_type_u8: u8 = op_tx.into(); - assert_eq!(tx_type_u8, AlloyOpTxType::Legacy as u8); - } - - #[test] - fn test_from_op_tx_type_to_u8_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let tx_type_u8: U8 = op_tx.into(); - assert_eq!(tx_type_u8, U8::from(AlloyOpTxType::Legacy as u8)); - } - - #[test] - fn test_try_from_u8() { - let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u8).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_try_from_invalid_u8() { - let invalid_value: u8 = 255; - let result = OpTxType::try_from(invalid_value); - assert_eq!(result, Err(Error::Custom("Invalid transaction type"))); - } - - #[test] - fn test_try_from_u64() { - let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u64).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_try_from_u64_out_of_range() { - let result = OpTxType::try_from(u64::MAX); - assert_eq!(result, Err(Error::Custom("value out of range"))); - } - - #[test] - fn test_try_from_u64_within_range() { - let valid_value: U64 = U64::from(AlloyOpTxType::Legacy as u64); - let op_tx = OpTxType::try_from(valid_value).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_default() { - let default_tx = OpTxType::default(); - assert_eq!(default_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_partial_eq_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - assert_eq!(op_tx, AlloyOpTxType::Legacy as u8); - } - - #[test] - fn test_encodable() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let mut buf = BytesMut::new(); - op_tx.encode(&mut buf); - assert_eq!(buf, BytesMut::from(&[0x80][..])); - } - - #[test] - fn test_decodable_success() { - // Using the RLP-encoded form of 0, which is `b"\x80"` - let mut buf: &[u8] = &[0x80]; - let decoded_tx = OpTxType::decode(&mut buf).unwrap(); - assert_eq!(decoded_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_decodable_invalid() { - let mut buf: &[u8] = &[255]; - let result = OpTxType::decode(&mut buf); - assert!(result.is_err()); - } - #[rstest] - #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] fn test_txtype_to_compact( #[case] tx_type: OpTxType, #[case] expected_identifier: usize, @@ -297,11 +32,11 @@ mod tests { } #[rstest] - #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] fn test_txtype_from_compact( #[case] expected_type: OpTxType, #[case] identifier: usize, diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index d56fd5bc0f2..ceee1e26cec 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -23,6 +23,9 @@ alloy-primitives.workspace = true alloy-rlp.workspace = true revm-primitives.workspace = true +# op +op-alloy-consensus = { workspace = true, optional = true } + # misc byteorder = { workspace = true, optional = true } bytes.workspace = true @@ -78,28 +81,34 @@ arbitrary = [ "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", "revm-primitives/arbitrary", - "reth-codecs?/arbitrary" + "reth-codecs?/arbitrary", + "op-alloy-consensus?/arbitrary" ] serde-bincode-compat = [ "serde", "serde_with", "alloy-consensus/serde-bincode-compat", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat" ] serde = [ "dep:serde", "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde", - "reth-codecs?/serde", - "revm-primitives/serde", - "roaring/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs?/serde", + "revm-primitives/serde", + "roaring/serde", "revm-primitives/serde", + "op-alloy-consensus?/serde" ] reth-codec = [ "dep:reth-codecs", "dep:modular-bitfield", "dep:byteorder", ] +op = [ + "dep:op-alloy-consensus", +] diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 4d721dd00b3..f9065cda24a 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -46,6 +46,13 @@ macro_rules! impl_in_mem_size { impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); +#[cfg(feature = "op")] +impl InMemorySize for op_alloy_consensus::OpTxType { + fn size(&self) -> usize { + 1 + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index d2caebe4c9f..c2f2e04899d 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -1,10 +1,8 @@ //! Abstraction of transaction envelope type ID. -use core::fmt; - -use alloy_primitives::{U64, U8}; - use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; +use alloy_primitives::{U64, U8}; +use core::fmt; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. @@ -60,3 +58,26 @@ pub trait TxType: !self.is_eip4844() } } + +#[cfg(feature = "op")] +impl TxType for op_alloy_consensus::OpTxType { + fn is_legacy(&self) -> bool { + matches!(self, Self::Legacy) + } + + fn is_eip2930(&self) -> bool { + matches!(self, Self::Eip2930) + } + + fn is_eip1559(&self) -> bool { + matches!(self, Self::Eip1559) + } + + fn is_eip4844(&self) -> bool { + false + } + + fn is_eip7702(&self) -> bool { + matches!(self, Self::Eip7702) + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index bb970b58177..631f5c406ee 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -1,9 +1,11 @@ //! Compact implementation for [`AlloyTxDeposit`] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use op_alloy_consensus::TxDeposit as AlloyTxDeposit; +use op_alloy_consensus::{OpTxType, TxDeposit as AlloyTxDeposit}; use reth_codecs_derive::add_arbitrary_tests; +use crate::txtype::{COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY}; /// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. /// @@ -65,3 +67,51 @@ impl Compact for AlloyTxDeposit { (alloy_tx, buf) } } + + +impl crate::Compact for OpTxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use crate::txtype::*; + + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::Deposit => { + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP7702_TX_TYPE_ID => Self::Eip7702, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} \ No newline at end of file From 332cce1f9b193e9fe52d4903a3f6858d7fc96130 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 14:24:48 +0100 Subject: [PATCH 817/970] feat: add header AT to provider (#13030) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 2 + .../commands/debug_cmd/in_memory_merkle.rs | 1 + bin/reth/src/commands/debug_cmd/merkle.rs | 1 + .../src/commands/debug_cmd/replay_engine.rs | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 7 +- crates/chain-state/src/notifications.rs | 11 +- crates/cli/commands/Cargo.toml | 4 +- .../commands/src/init_state/without_evm.rs | 25 ++-- .../cli/commands/src/stage/dump/execution.rs | 3 + crates/cli/commands/src/stage/dump/merkle.rs | 28 +++-- .../beacon/src/engine/hooks/static_file.rs | 4 +- crates/consensus/beacon/src/engine/mod.rs | 17 ++- crates/consensus/common/src/validation.rs | 2 + crates/engine/local/Cargo.toml | 1 + crates/engine/local/src/miner.rs | 3 +- crates/engine/tree/src/tree/mod.rs | 12 +- crates/evm/src/provider.rs | 19 ++- crates/exex/exex/src/manager.rs | 6 +- crates/net/downloaders/src/bodies/bodies.rs | 13 +- crates/net/downloaders/src/bodies/task.rs | 4 +- crates/net/network/src/config.rs | 7 +- crates/net/network/src/eth_requests.rs | 14 +-- crates/net/network/src/test_utils/testnet.rs | 21 +++- crates/node/builder/src/builder/mod.rs | 14 ++- crates/node/builder/src/launch/common.rs | 2 + crates/node/builder/src/setup.rs | 4 +- crates/node/core/src/node_config.rs | 6 +- crates/node/events/src/cl.rs | 9 +- crates/optimism/rpc/src/eth/pending_block.rs | 1 + crates/optimism/rpc/src/witness.rs | 4 +- crates/payload/basic/src/lib.rs | 6 +- crates/primitives-traits/src/block/header.rs | 26 ++-- crates/rpc/rpc-builder/src/eth.rs | 7 +- crates/rpc/rpc-builder/src/lib.rs | 27 +++- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 18 ++- .../rpc-eth-api/src/helpers/pending_block.rs | 5 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 21 +++- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 6 +- crates/rpc/rpc/src/eth/core.rs | 10 +- crates/rpc/rpc/src/eth/filter.rs | 19 +-- .../rpc/rpc/src/eth/helpers/pending_block.rs | 1 + crates/rpc/rpc/src/validation.rs | 4 +- crates/stages/stages/src/stages/bodies.rs | 6 +- crates/stages/stages/src/stages/execution.rs | 11 +- .../stages/src/stages/hashing_account.rs | 6 +- crates/stages/stages/src/stages/headers.rs | 40 +++--- crates/stages/stages/src/stages/merkle.rs | 5 +- .../stages/src/stages/sender_recovery.rs | 5 +- crates/stages/stages/src/stages/utils.rs | 2 +- .../static-file/src/segments/headers.rs | 15 ++- .../static-file/src/static_file_producer.rs | 2 +- crates/storage/db-common/Cargo.toml | 1 + crates/storage/db-common/src/init.rs | 21 ++-- .../src/providers/blockchain_provider.rs | 80 +++++++----- .../provider/src/providers/consistent.rs | 67 +++++----- .../provider/src/providers/database/mod.rs | 44 ++++--- .../src/providers/database/provider.rs | 118 ++++++++++-------- crates/storage/provider/src/providers/mod.rs | 63 +++++----- .../provider/src/providers/static_file/jar.rs | 35 +++--- .../src/providers/static_file/manager.rs | 38 +++--- .../src/providers/static_file/writer.rs | 11 +- .../storage/provider/src/test_utils/mock.rs | 2 + .../storage/provider/src/test_utils/noop.rs | 4 + crates/storage/provider/src/traits/full.rs | 22 ++-- .../provider/src/traits/header_sync_gap.rs | 14 ++- crates/storage/storage-api/src/block.rs | 37 +++--- crates/storage/storage-api/src/chain_info.rs | 9 +- crates/storage/storage-api/src/header.rs | 32 +++-- crates/transaction-pool/src/maintain.rs | 9 +- examples/db-access/Cargo.toml | 1 + examples/db-access/src/main.rs | 3 +- 71 files changed, 667 insertions(+), 432 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf011d0f639..2a9d5222025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2987,6 +2987,7 @@ dependencies = [ name = "example-db-access" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-eth", "eyre", @@ -7202,6 +7203,7 @@ dependencies = [ name = "reth-engine-local" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "eyre", diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 870dc1ddf23..6fbfa33b891 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -63,6 +63,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 78e32df5266..16a1f111272 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -62,6 +62,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index f0016a129bd..4b98fc85d0b 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -61,6 +61,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ec9beb20a07..757729d5416 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1424,7 +1424,12 @@ mod tests { } fn setup_genesis< - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, >( factory: &ProviderFactory, mut genesis: SealedBlock, diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index c4e0415436a..498528813d6 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -162,19 +162,22 @@ pub struct ForkChoiceNotifications( /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. pub trait ForkChoiceSubscriptions: Send + Sync { + /// Block Header type. + type Header: Clone + Send + Sync + 'static; + /// Get notified when a new safe block of the chain is selected. - fn subscribe_safe_block(&self) -> ForkChoiceNotifications; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications; /// Get notified when a new finalized block of the chain is selected. - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; /// Convenience method to get a stream of the new safe blocks of the chain. - fn safe_block_stream(&self) -> ForkChoiceStream { + fn safe_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_safe_block().0) } /// Convenience method to get a stream of the new finalized blocks of the chain. - fn finalized_block_stream(&self) -> ForkChoiceStream { + fn finalized_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_finalized_block().0) } } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 90acb82d71d..2220efda5c6 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -17,7 +17,7 @@ reth-cli.workspace = true reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true -reth-codecs = { workspace = true, optional = true } +reth-codecs.workspace = true reth-config.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } @@ -110,7 +110,7 @@ arbitrary = [ "reth-prune-types/test-utils", "reth-stages-types/test-utils", "reth-trie-common/test-utils", - "reth-codecs?/arbitrary", + "reth-codecs/arbitrary", "reth-prune-types?/arbitrary", "reth-stages-types?/arbitrary", "reth-trie-common?/arbitrary", diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 22236d14c76..f8f72709a7e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,7 +1,8 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; +use reth_codecs::Compact; use reth_node_builder::NodePrimitives; use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ @@ -27,26 +28,26 @@ pub(crate) fn read_header_from_file(path: PathBuf) -> Result( provider_rw: &Provider, - header: SealedHeader, + header: SealedHeader<::BlockHeader>, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + StageCheckpointWriter - + BlockWriter>, + + BlockWriter::Block>, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); let static_file_provider = provider_rw.static_file_provider(); // Write EVM dummy data up to `header - 1` block - append_dummy_chain(&static_file_provider, header.number - 1)?; + append_dummy_chain(&static_file_provider, header.number() - 1)?; info!(target: "reth::cli", "Appending first valid block."); append_first_block(provider_rw, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; } info!(target: "reth::cli", "Set up finished."); @@ -60,12 +61,12 @@ where /// height. fn append_first_block( provider_rw: &Provider, - header: &SealedHeader, + header: &SealedHeader<::BlockHeader>, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter> - + StaticFileProviderFactory, + Provider: BlockWriter::Block> + + StaticFileProviderFactory>, { provider_rw.insert_block( SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) @@ -81,9 +82,9 @@ where &header.hash(), )?; - sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; Ok(()) } @@ -93,7 +94,7 @@ where /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain( +fn append_dummy_chain>( sf_provider: &StaticFileProvider, target_height: BlockNumber, ) -> Result<(), eyre::Error> { diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 70fd23f9847..73d2e8a9f8f 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -31,6 +31,7 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, E: BlockExecutorProvider, @@ -143,6 +144,7 @@ fn unwind_and_copy< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -186,6 +188,7 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, E: BlockExecutorProvider, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index ce187437218..59a25c492aa 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -25,21 +25,23 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage< +pub(crate) async fn dump_merkle_stage( + db_tool: &DbTool, + from: BlockNumber, + to: BlockNumber, + output_datadir: ChainPath, + should_run: bool, +) -> Result<()> +where N: ProviderNodeTypes< DB = Arc, Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, ->( - db_tool: &DbTool, - from: BlockNumber, - to: BlockNumber, - output_datadir: ChainPath, - should_run: bool, -) -> Result<()> { +{ let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { @@ -81,6 +83,7 @@ fn unwind_and_copy< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -161,11 +164,10 @@ fn unwind_and_copy< } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, - to: u64, - from: u64, -) -> eyre::Result<()> { +fn dry_run(output_provider_factory: ProviderFactory, to: u64, from: u64) -> eyre::Result<()> +where + N: ProviderNodeTypes>, +{ info!(target: "reth::cli", "Executing stage."); let provider = output_provider_factory.database_provider_rw()?; diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 7cd286f659c..b4b38239a03 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -36,7 +36,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader + ChainStateBlockReader, @@ -152,7 +152,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader + ChainStateBlockReader, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7a894f08e1c..f188e495be4 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -21,7 +21,7 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, EthBlockClient, }; -use reth_node_types::{Block, BlockTy, NodeTypesWithEngine}; +use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; @@ -234,9 +234,9 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader> + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker

> + StageCheckpointReader + ChainSpecProvider + 'static, @@ -1804,9 +1804,9 @@ where N: EngineNodeTypes, Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader> + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker
> + StageCheckpointReader + ChainSpecProvider + Unpin @@ -2179,7 +2179,12 @@ mod tests { fn insert_blocks< 'a, - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, >( provider_factory: ProviderFactory, mut blocks: impl Iterator, diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index dce7d257954..2d681be449a 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -391,6 +391,8 @@ mod tests { } impl HeaderProvider for Provider { + type Header = Header; + fn is_known(&self, _block_hash: &BlockHash) -> ProviderResult { Ok(self.is_known) } diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index d8a66e65e04..b3ad169e318 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -29,6 +29,7 @@ reth-transaction-pool.workspace = true reth-stages-api.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index a5c7cf4d4c6..29418c0b714 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,5 +1,6 @@ //! Contains the implementation of the mining mode for the local engine. +use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; @@ -114,7 +115,7 @@ where to_engine, mode, payload_builder, - last_timestamp: latest_header.timestamp, + last_timestamp: latest_header.timestamp(), last_block_hashes: vec![latest_header.hash()], }; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index cdd066cdc24..16e07e51844 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -540,9 +540,13 @@ impl std::fmt::Debug impl EngineApiTreeHandler where - N: NodePrimitives, + N: NodePrimitives< + Block = reth_primitives::Block, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, P: DatabaseProviderFactory - + BlockReader + + BlockReader + StateProviderFactory + StateReader + Clone @@ -1357,7 +1361,7 @@ where // update the tracked chain height, after backfill sync both the canonical height and // persisted height are the same self.state.tree_state.set_canonical_head(new_head.num_hash()); - self.persistence_state.finish(new_head.hash(), new_head.number); + self.persistence_state.finish(new_head.hash(), new_head.number()); // update the tracked canonical head self.canonical_in_memory_state.set_canonical_head(new_head); @@ -1622,7 +1626,7 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - debug!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 5f86eb74dd4..ec2f1803da0 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -3,7 +3,6 @@ use crate::ConfigureEvmEnv; use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; -use reth_primitives::NodePrimitives; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -13,7 +12,7 @@ use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] -pub trait EvmEnvProvider: Send + Sync { +pub trait EvmEnvProvider: Send + Sync { /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_env_at( @@ -24,17 +23,17 @@ pub trait EvmEnvProvider: Se evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the /// given block header. fn env_with_header( &self, - header: &N::BlockHeader, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
, { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); @@ -48,11 +47,11 @@ pub trait EvmEnvProvider: Se &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &N::BlockHeader, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. @@ -63,15 +62,15 @@ pub trait EvmEnvProvider: Se evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given block header. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &N::BlockHeader, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; } diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index a3b92e9f17a..16a93052614 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -246,7 +246,7 @@ pub struct ExExManager { /// Write-Ahead Log for the [`ExExNotification`]s. wal: Wal, /// A stream of finalized headers. - finalized_header_stream: ForkChoiceStream, + finalized_header_stream: ForkChoiceStream>, /// A handle to the `ExEx` manager. handle: ExExManagerHandle, @@ -270,7 +270,7 @@ where handles: Vec>, max_capacity: usize, wal: Wal, - finalized_header_stream: ForkChoiceStream, + finalized_header_stream: ForkChoiceStream>, ) -> Self { let num_exexs = handles.len(); @@ -355,7 +355,7 @@ where /// /// This function checks if all ExExes are on the canonical chain and finalizes the WAL if /// necessary. - fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { + fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { debug!(target: "exex::manager", header = ?finalized_header.num_hash(), "Received finalized header"); // Check if all ExExes are on the canonical chain diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 82f45dd23bf..682995e7eb3 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -1,5 +1,6 @@ use super::queue::BodiesRequestQueue; use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::Stream; use futures_util::StreamExt; @@ -14,7 +15,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; -use reth_primitives_traits::size::InMemorySize; +use reth_primitives_traits::{size::InMemorySize, BlockHeader as _}; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ @@ -71,7 +72,7 @@ where Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), @@ -96,7 +97,7 @@ where &self, range: RangeInclusive, max_non_empty: u64, - ) -> DownloadResult>> { + ) -> DownloadResult>>> { if range.is_empty() || max_non_empty == 0 { return Ok(None) } @@ -109,7 +110,7 @@ where let mut collected = 0; let mut non_empty_headers = 0; let headers = self.provider.sealed_headers_while(range.clone(), |header| { - let should_take = range.contains(&header.number) && + let should_take = range.contains(&header.number()) && non_empty_headers < max_non_empty && collected < self.stream_batch_size; @@ -300,7 +301,7 @@ where impl BodyDownloader for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { type Body = B::Body; @@ -350,7 +351,7 @@ where impl Stream for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { type Item = BodyDownloaderResult; diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index a2b63c8ed18..89af9813e3c 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -52,10 +52,10 @@ impl TaskDownloader { /// /// fn t< /// B: BodiesClient + 'static, - /// Provider: HeaderProvider + Unpin + 'static, + /// Provider: HeaderProvider
+ Unpin + 'static, /// >( /// client: Arc, - /// consensus: Arc>, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index a7d8a98fae6..bde2cf78d97 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -147,8 +147,11 @@ where impl NetworkConfig where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Clone + Unpin + 'static, diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index bb45507bdbd..ee8640daaa9 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -4,7 +4,7 @@ use crate::{ budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, metrics::EthRequestHandlerMetrics, }; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; @@ -83,7 +83,7 @@ where C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers - fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ + fn get_headers_response(&self, request: GetBlockHeaders) -> Vec { let GetBlockHeaders { start_block, limit, skip, direction } = request; let mut headers = Vec::new(); @@ -105,7 +105,7 @@ where if let Some(header) = self.client.header_by_hash_or_number(block).unwrap_or_default() { match direction { HeadersDirection::Rising => { - if let Some(next) = (header.number + 1).checked_add(skip) { + if let Some(next) = (header.number() + 1).checked_add(skip) { block = next.into() } else { break @@ -116,14 +116,14 @@ where // prevent under flows for block.number == 0 and `block.number - skip < // 0` if let Some(next) = - header.number.checked_sub(1).and_then(|num| num.checked_sub(skip)) + header.number().checked_sub(1).and_then(|num| num.checked_sub(skip)) { block = next.into() } else { break } } else { - block = header.parent_hash.into() + block = header.parent_hash().into() } } } @@ -146,7 +146,7 @@ where &self, _peer_id: PeerId, request: GetBlockHeaders, - response: oneshot::Sender>>, + response: oneshot::Sender>>, ) { self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); @@ -225,7 +225,7 @@ where impl Future for EthRequestHandler where C: BlockReader - + HeaderProvider + + HeaderProvider
+ Unpin, { type Output = (); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 9801ecf9293..bdd02118352 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -194,8 +194,11 @@ where impl Testnet where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Clone + Unpin + 'static, @@ -257,8 +260,11 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, @@ -455,8 +461,11 @@ where impl Future for Peer where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 06d5294d800..b311cc4e2a0 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -651,8 +651,11 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, - Node::Provider: - BlockReader, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { self.start_network_with(builder, pool, Default::default()) } @@ -671,8 +674,11 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, - Node::Provider: - BlockReader, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 2d126266a25..25c81a8d5cf 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -385,6 +385,7 @@ where Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, { let factory = ProviderFactory::new( @@ -456,6 +457,7 @@ where Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, { let factory = self.create_provider_factory().await?; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index ec4ee4956dd..0a0e4f10dbc 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,7 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; -use reth_node_api::{BodyTy, HeaderTy}; +use reth_node_api::{BodyTy, HeaderTy, NodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -41,6 +41,7 @@ where N: ProviderNodeTypes, Client: BlockClient
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -88,6 +89,7 @@ where H: HeaderDownloader
> + 'static, B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 2fd39bde82f..861e47fc3cf 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -319,9 +319,9 @@ impl NodeConfig { Ok(Head { number: head, hash, - difficulty: header.difficulty, + difficulty: header.difficulty(), total_difficulty, - timestamp: header.timestamp, + timestamp: header.timestamp(), }) } @@ -344,7 +344,7 @@ impl NodeConfig { // try to look up the header in the database if let Some(header) = header { info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database"); - return Ok(header.number) + return Ok(header.number()) } Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index bf0d4a59b21..dac13fe0763 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,5 +1,6 @@ //! Events related to Consensus Layer health. +use alloy_consensus::Header; use futures::Stream; use reth_storage_api::CanonChainTracker; use std::{ @@ -20,9 +21,9 @@ const NO_TRANSITION_CONFIG_EXCHANGED_PERIOD: Duration = Duration::from_secs(120) const NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD: Duration = Duration::from_secs(120); /// A Stream of [`ConsensusLayerHealthEvent`]. -pub struct ConsensusLayerHealthEvents { +pub struct ConsensusLayerHealthEvents { interval: Interval, - canon_chain: Box, + canon_chain: Box>, } impl fmt::Debug for ConsensusLayerHealthEvents { @@ -31,9 +32,9 @@ impl fmt::Debug for ConsensusLayerHealthEvents { } } -impl ConsensusLayerHealthEvents { +impl ConsensusLayerHealthEvents { /// Creates a new [`ConsensusLayerHealthEvents`] with the given canonical chain tracker. - pub fn new(canon_chain: Box) -> Self { + pub fn new(canon_chain: Box>) -> Self { // Skip the first tick to prevent the false `ConsensusLayerHealthEvent::NeverSeen` event. let interval = tokio::time::interval_at(Instant::now() + CHECK_INTERVAL, CHECK_INTERVAL); Self { interval, canon_chain } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 98ea65778d8..852c4454f06 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -27,6 +27,7 @@ where Provider: BlockReaderIdExt< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index ed9d77e73e8..278c785cbe9 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -31,7 +31,7 @@ impl OpDebugWitnessApi { impl OpDebugWitnessApi where - Provider: BlockReaderIdExt, + Provider: BlockReaderIdExt
, { /// Fetches the parent header by hash. fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { @@ -45,7 +45,7 @@ where impl DebugExecutionWitnessApiServer for OpDebugWitnessApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ StateProviderFactory + ChainSpecProvider + 'static, diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 0ab411d3e60..8e9c06865d0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -132,7 +132,11 @@ impl BasicPayloadJobGenerator PayloadJobGenerator for BasicPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt
+ + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 6ac85d82caa..47d50a45bb5 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -30,27 +30,15 @@ pub trait BlockHeader: + MaybeSerde + MaybeArbitrary + MaybeSerdeBincodeCompat + + AsRef + 'static { + /// Returns whether this header corresponds to an empty block. + fn is_empty(&self) -> bool; } -impl BlockHeader for T where - T: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + alloy_consensus::BlockHeader - + Sealable - + InMemorySize - + MaybeSerde - + MaybeArbitrary - + MaybeSerdeBincodeCompat - + 'static -{ +impl BlockHeader for alloy_consensus::Header { + fn is_empty(&self) -> bool { + self.is_empty() + } } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 2a781fc0859..283fba6e957 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -29,8 +29,11 @@ pub struct EthHandlers { impl EthHandlers where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + Clone + Unpin + 'static, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 3817c4d3b37..e2141dcf1ce 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -41,6 +41,7 @@ //! Transaction = TransactionSigned, //! Block = reth_primitives::Block, //! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, @@ -121,6 +122,7 @@ //! Transaction = TransactionSigned, //! Block = reth_primitives::Block, //! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, @@ -201,7 +203,7 @@ use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, HeaderProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, @@ -269,8 +271,11 @@ pub async fn launch, ) -> Result where - Provider: FullRpcProvider - + AccountReader + Provider: FullRpcProvider< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, @@ -667,6 +672,7 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, >, { let Self { @@ -743,7 +749,11 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, - Provider: BlockReader, + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { let Self { provider, @@ -781,6 +791,7 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, >, { let mut modules = TransportRpcModules::default(); @@ -940,8 +951,11 @@ impl RpcRegistryInner where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + Clone + Unpin + 'static, @@ -1311,6 +1325,7 @@ where Provider: FullRpcProvider< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, > + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 0099e0f6b16..5843e945b8c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -163,11 +163,11 @@ pub trait EthFees: LoadFee { for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); - gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); + base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( - header.blob_gas_used.unwrap_or_default() as f64 + header.blob_gas_used().unwrap_or_default() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); @@ -181,8 +181,8 @@ pub trait EthFees: LoadFee { rewards.push( calculate_reward_percentiles_for_block( percentiles, - header.gas_used, - header.base_fee_per_gas.unwrap_or_default(), + header.gas_used(), + header.base_fee_per_gas().unwrap_or_default(), &block.body.transactions, &receipts, ) @@ -198,14 +198,10 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( + last_header.next_block_base_fee( self.provider() .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp) - .next_block_base_fee( - last_header.gas_used , - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default() , - ) as u128, + .base_fee_params_at_timestamp(last_header.timestamp())).unwrap_or_default() as u128 ); // Same goes for the `base_fee_per_blob_gas`: diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index e3ef6621bcb..4394feb2834 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,7 +3,7 @@ use super::SpawnBlocking; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; @@ -50,6 +50,7 @@ pub trait LoadPendingBlock: Provider: BlockReaderIdExt< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, @@ -88,7 +89,7 @@ pub trait LoadPendingBlock: let chain_spec = self.provider().chain_spec(); latest_header.base_fee_per_gas = latest_header.next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), + chain_spec.base_fee_params_at_timestamp(latest_header.timestamp()), ); // update excess blob gas consumed above target diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 2dcabc0d184..7a0d9dfa0f0 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -96,8 +96,11 @@ impl EthStateCache { pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where Provider: StateProviderFactory - + BlockReader - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, { @@ -115,8 +118,11 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory - + BlockReader - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -331,8 +337,11 @@ where impl Future for EthStateCacheService where Provider: StateProviderFactory - + BlockReader - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 3f8186ae150..73cab209fd5 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,7 +1,7 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use alloy_consensus::constants::GWEI_TO_WEI; +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; @@ -142,8 +142,8 @@ where let mut populated_blocks = 0; // we only check a maximum of 2 * max_block_history, or the number of blocks in the chain - let max_blocks = if self.oracle_config.max_block_history * 2 > header.number { - header.number + let max_blocks = if self.oracle_config.max_block_history * 2 > header.number() { + header.number() } else { self.oracle_config.max_block_history * 2 }; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 86e0f9f383c..092d9485162 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,6 +3,7 @@ use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; @@ -286,7 +287,7 @@ where .header_by_number_or_tag(BlockNumberOrTag::Latest) .ok() .flatten() - .map(|header| header.number) + .map(|header| header.number()) .unwrap_or_default(), ); @@ -438,8 +439,11 @@ mod tests { use crate::EthApi; fn build_test_eth_api< - P: BlockReaderIdExt - + BlockReader + P: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + BlockReader + ChainSpecProvider + EvmEnvProvider + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 200afadaa2e..b16b370b2c0 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,5 +1,6 @@ //! `eth_` `Filter` RPC handler implementation +use alloy_consensus::BlockHeader; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, @@ -380,7 +381,7 @@ where .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; - let block_num_hash = BlockNumHash::new(header.number, block_hash); + let block_num_hash = BlockNumHash::new(header.number(), block_hash); // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged @@ -402,7 +403,7 @@ where block_num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; Ok(all_logs) @@ -483,20 +484,20 @@ where for (idx, header) in headers.iter().enumerate() { // only if filter matches - if FilteredParams::matches_address(header.logs_bloom, &address_filter) && - FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + if FilteredParams::matches_address(header.logs_bloom(), &address_filter) && + FilteredParams::matches_topics(header.logs_bloom(), &topics_filter) { // these are consecutive headers, so we can use the parent hash of the next // block to get the current header's hash let block_hash = match headers.get(idx + 1) { - Some(parent) => parent.parent_hash, + Some(parent) => parent.parent_hash(), None => self .provider - .block_hash(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?, + .block_hash(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, }; - let num_hash = BlockNumHash::new(header.number, block_hash); + let num_hash = BlockNumHash::new(header.number(), block_hash); if let Some((receipts, maybe_block)) = self.receipts_and_maybe_block(&num_hash, chain_info.best_number).await? { @@ -509,7 +510,7 @@ where num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; // size check but only if range is multiple blocks, so we always return all diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index a67522ce032..794b9dde82f 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -21,6 +21,7 @@ where Provider: BlockReaderIdExt< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 91a3dae2ce7..d862bc5f30d 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -89,7 +89,7 @@ where impl ValidationApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory + HeaderProvider @@ -410,7 +410,7 @@ where #[async_trait] impl BlockSubmissionValidationApiServer for ValidationApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory + HeaderProvider diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index c1fde11c235..0f90ff69e46 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -75,7 +75,9 @@ impl BodyStage { unwind_block: Option, ) -> Result<(), StageError> where - Provider: DBProvider + BlockReader + StaticFileProviderFactory, + Provider: DBProvider + + BlockReader
+ + StaticFileProviderFactory, { // Get id for the next tx_num of zero if there are no transactions. let next_tx_num = provider @@ -152,7 +154,7 @@ where Provider: DBProvider + StaticFileProviderFactory + StatsReader - + BlockReader + + BlockReader
+ BlockWriter>, D: BodyDownloader>, { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index f7832dd788e..c8cc8908086 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -193,7 +193,10 @@ where unwind_to: Option, ) -> Result<(), StageError> where - Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + Provider: StaticFileProviderFactory + + DBProvider + + BlockReader + + HeaderProvider
, { // If thre's any receipts pruning configured, receipts are written directly to database and // inconsistencies are expected. @@ -265,8 +268,10 @@ impl Stage for ExecutionStage where E: BlockExecutorProvider>, Provider: DBProvider - + BlockReader::Block> - + StaticFileProviderFactory + + BlockReader< + Block = ::Block, + Header = ::BlockHeader, + > + StaticFileProviderFactory + StatsReader + BlockHashReader + StateWriter::Receipt> diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index e6b1e548455..551c10d7711 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -63,8 +63,10 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> where - N::Primitives: - reth_primitives_traits::FullNodePrimitives, + N::Primitives: reth_primitives_traits::FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 100fe4e979a..7b9b394b561 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; @@ -10,7 +11,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; -use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives::{NodePrimitives, SealedHeader, StaticFileSegment}; use reth_primitives_traits::serde_bincode_compat; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, @@ -50,7 +51,7 @@ pub struct HeaderStage { /// Consensus client implementation consensus: Arc>, /// Current sync gap. - sync_gap: Option, + sync_gap: Option>, /// ETL collector with `HeaderHash` -> `BlockNumber` hash_collector: Collector, /// ETL collector with `BlockNumber` -> `BincodeSealedHeader` @@ -63,7 +64,7 @@ pub struct HeaderStage { impl HeaderStage where - Downloader: HeaderDownloader
, + Downloader: HeaderDownloader, { /// Create a new header stage pub fn new( @@ -89,10 +90,14 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers + StaticFileProviderFactory>( - &mut self, - provider: &P, - ) -> Result { + fn write_headers

(&mut self, provider: &P) -> Result + where + P: DBProvider + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, + Downloader: HeaderDownloader

::BlockHeader>, + { let total_headers = self.header_collector.len(); info!(target: "sync::stages::headers", total = total_headers, "Writing headers"); @@ -121,19 +126,19 @@ where info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers"); } - let sealed_header: SealedHeader = - bincode::deserialize::>(&header_buf) + let sealed_header: SealedHeader = + bincode::deserialize::>(&header_buf) .map_err(|err| StageError::Fatal(Box::new(err)))? .into(); let (header, header_hash) = sealed_header.split(); - if header.number == 0 { + if header.number() == 0 { continue } - last_header_number = header.number; + last_header_number = header.number(); // Increase total difficulty - td += header.difficulty; + td += header.difficulty(); // Header validation self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { @@ -193,9 +198,10 @@ where impl Stage for HeaderStage where - P: HeaderSyncGapProvider, - D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, + Provider::Primitives: NodePrimitives, + P: HeaderSyncGapProvider
::BlockHeader>, + D: HeaderDownloader
::BlockHeader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -232,7 +238,7 @@ where } debug!(target: "sync::stages::headers", ?tip, head = ?gap.local_head.hash(), "Commencing sync"); - let local_head_number = gap.local_head.number; + let local_head_number = gap.local_head.number(); // let the downloader know what to sync self.downloader.update_sync_gap(gap.local_head, gap.target); @@ -241,9 +247,9 @@ where loop { match ready!(self.downloader.poll_next_unpin(cx)) { Some(Ok(headers)) => { - info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number), to_block = headers.last().map(|h| h.number), "Received headers"); + info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number()), to_block = headers.last().map(|h| h.number()), "Received headers"); for header in headers { - let header_number = header.number; + let header_number = header.number(); self.hash_collector.insert(header.hash(), header_number)?; self.header_collector.insert( diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 2d2503b5391..8095dfed904 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; @@ -135,7 +136,7 @@ where Provider: DBProvider + TrieWriter + StatsReader - + HeaderProvider + + HeaderProvider
+ StageCheckpointReader + StageCheckpointWriter, { @@ -168,7 +169,7 @@ where let target_block = provider .header_by_number(to_block)? .ok_or_else(|| ProviderError::HeaderNotFound(to_block.into()))?; - let target_block_root = target_block.state_root; + let target_block_root = target_block.state_root(); let mut checkpoint = self.get_execution_checkpoint(provider)?; let (trie_root, entities_checkpoint) = if range.is_empty() { diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 674d035021d..d34a4b07921 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -59,7 +59,7 @@ impl Default for SenderRecoveryStage { impl Stage for SenderRecoveryStage where Provider: DBProvider - + BlockReader + + BlockReader
+ StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, @@ -146,7 +146,8 @@ fn recover_range( senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: + DBProvider + HeaderProvider
+ StaticFileProviderFactory, CURSOR: DbCursorRW, { debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 5aa1f3f880c..34aaeee44be 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -258,7 +258,7 @@ pub(crate) fn missing_static_data_error( segment: StaticFileSegment, ) -> Result where - Provider: BlockReader + StaticFileProviderFactory, + Provider: BlockReader
+ StaticFileProviderFactory, { let mut last_block = static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index e06e1f09a17..dff80a23f83 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,7 +1,9 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; @@ -11,7 +13,11 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } @@ -25,7 +31,10 @@ impl Segment for Hea let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - let mut headers_cursor = provider.tx_ref().cursor_read::()?; + let mut headers_cursor = provider + .tx_ref() + .cursor_read::::BlockHeader>>( + )?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; let mut header_td_cursor = diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 371a344d872..30a72561b23 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -90,7 +90,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader, >, diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 9e4954357f8..28dbc33e90d 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -24,6 +24,7 @@ reth-fs-util.workspace = true reth-node-types.workspace = true # eth +alloy-consensus.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 9d4fb4ff02c..d738aaec439 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -1,5 +1,6 @@ //! Reth genesis initialization utility functions. +use alloy_consensus::BlockHeader; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, B256, U256}; use reth_chainspec::EthChainSpec; @@ -8,7 +9,9 @@ use reth_config::config::EtlConfig; use reth_db::tables; use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; -use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; +use reth_primitives::{ + Account, Bytecode, GotExpected, NodePrimitives, Receipts, StaticFileSegment, StorageEntry, +}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, @@ -69,7 +72,10 @@ impl From for InitDatabaseError { /// Write the genesis block if it has not already been written pub fn init_genesis(factory: &PF) -> Result where - PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, + PF: DatabaseProviderFactory + + StaticFileProviderFactory> + + ChainSpecProvider + + BlockHashReader, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter @@ -78,7 +84,7 @@ where + StateWriter + StateWriter + AsRef, - PF::ChainSpec: EthChainSpec
, + PF::ChainSpec: EthChainSpec
::BlockHeader>, { let chain = factory.chain_spec(); @@ -307,15 +313,16 @@ pub fn insert_genesis_header( chain: &Spec, ) -> ProviderResult<()> where - Provider: StaticFileProviderFactory + DBProvider, - Spec: EthChainSpec
, + Provider: StaticFileProviderFactory> + + DBProvider, + Spec: EthChainSpec
::BlockHeader>, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); let static_file_provider = provider.static_file_provider(); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let (difficulty, hash) = (header.difficulty, block_hash); + let (difficulty, hash) = (header.difficulty(), block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; writer.append_header(header, difficulty, &hash)?; } @@ -359,7 +366,7 @@ where let expected_state_root = provider_rw .header_by_number(block)? .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root; + .state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 08f5e4680a2..1bb65a6e4fb 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,7 +25,7 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, @@ -89,7 +89,10 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(storage: ProviderFactory, latest: SealedHeader) -> ProviderResult { + pub fn with_latest( + storage: ProviderFactory, + latest: SealedHeader>, + ) -> ProviderResult { let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? @@ -175,11 +178,13 @@ impl StaticFileProviderFactory for BlockchainProvider2 } impl HeaderProvider for BlockchainProvider2 { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.consistent_provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.consistent_provider()?.header_by_number(num) } @@ -191,26 +196,32 @@ impl HeaderProvider for BlockchainProvider2 { self.consistent_provider()?.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.consistent_provider()?.headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_headers_while(range, predicate) } } @@ -292,7 +303,7 @@ impl BlockReader for BlockchainProvider2 { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.consistent_provider()?.ommers(id) } @@ -470,7 +481,7 @@ impl StageCheckpointReader for BlockchainProvider2 { } } -impl EvmEnvProvider for BlockchainProvider2 { +impl EvmEnvProvider> for BlockchainProvider2 { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -479,7 +490,7 @@ impl EvmEnvProvider for BlockchainProvider2 { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -488,11 +499,11 @@ impl EvmEnvProvider for BlockchainProvider2 { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -504,7 +515,7 @@ impl EvmEnvProvider for BlockchainProvider2 { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -512,11 +523,11 @@ impl EvmEnvProvider for BlockchainProvider2 { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } @@ -652,10 +663,9 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 -where - Self: BlockReader, -{ +impl CanonChainTracker for BlockchainProvider2 { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.canonical_in_memory_state.on_forkchoice_update_received(); @@ -673,15 +683,15 @@ where self.canonical_in_memory_state.last_exchanged_transition_configuration_timestamp() } - fn set_canonical_head(&self, header: SealedHeader) { + fn set_canonical_head(&self, header: SealedHeader) { self.canonical_in_memory_state.set_canonical_head(header); } - fn set_safe(&self, header: SealedHeader) { + fn set_safe(&self, header: SealedHeader) { self.canonical_in_memory_state.set_safe(header); } - fn set_finalized(&self, header: SealedHeader) { + fn set_finalized(&self, header: SealedHeader) { self.canonical_in_memory_state.set_finalized(header); } } @@ -694,26 +704,32 @@ where self.consistent_provider()?.block_by_id(id) } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header_by_number_or_tag(id) } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header_by_id(id) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.header_by_id(id) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { self.consistent_provider()?.ommers_by_id(id) } } @@ -727,12 +743,14 @@ impl> CanonStateSubscriptions } impl ForkChoiceSubscriptions for BlockchainProvider2 { - fn subscribe_safe_block(&self) -> ForkChoiceNotifications { + type Header = HeaderTy; + + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) } - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_finalized_block(); ForkChoiceNotifications(receiver) } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e70f4b4e5e1..5aea5be27d4 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,7 +6,7 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::{ eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, @@ -19,7 +19,7 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::{BlockTy, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; use reth_primitives::{ Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, @@ -628,7 +628,9 @@ impl StaticFileProviderFactory for ConsistentProvider { } impl HeaderProvider for ConsistentProvider { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), @@ -636,7 +638,7 @@ impl HeaderProvider for ConsistentProvider { ) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), @@ -675,7 +677,10 @@ impl HeaderProvider for ConsistentProvider { self.storage_provider.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), @@ -684,7 +689,10 @@ impl HeaderProvider for ConsistentProvider { ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), @@ -695,7 +703,7 @@ impl HeaderProvider for ConsistentProvider { fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), @@ -707,8 +715,8 @@ impl HeaderProvider for ConsistentProvider { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), @@ -832,7 +840,7 @@ impl BlockReader for ConsistentProvider { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.ommers(id), @@ -868,7 +876,7 @@ impl BlockReader for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { let block_tx_count = state.block_ref().block.body.transactions().len() as u64; - if state.block_ref().block().number == number { + if state.block_ref().block().number() == number { stored_indices.tx_count = block_tx_count; } else { stored_indices.first_tx_num += block_tx_count; @@ -1017,7 +1025,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number())), ) } @@ -1222,7 +1230,7 @@ impl StageCheckpointReader for ConsistentProvider { } } -impl EvmEnvProvider for ConsistentProvider { +impl EvmEnvProvider> for ConsistentProvider { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -1231,7 +1239,7 @@ impl EvmEnvProvider for ConsistentProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1242,15 +1250,15 @@ impl EvmEnvProvider for ConsistentProvider { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -1262,7 +1270,7 @@ impl EvmEnvProvider for ConsistentProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1272,15 +1280,15 @@ impl EvmEnvProvider for ConsistentProvider { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } @@ -1326,7 +1334,7 @@ impl BlockReaderIdExt for ConsistentProvider { } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { Ok(match id { BlockNumberOrTag::Latest => { Some(self.canonical_in_memory_state.get_canonical_head().unseal()) @@ -1347,7 +1355,7 @@ impl BlockReaderIdExt for ConsistentProvider { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>>> { match id { BlockNumberOrTag::Latest => { Ok(Some(self.canonical_in_memory_state.get_canonical_head())) @@ -1366,21 +1374,24 @@ impl BlockReaderIdExt for ConsistentProvider { } } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult>> { Ok(match id { BlockId::Number(num) => self.header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 3c22a1a73a2..d34d67d9dc8 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,7 +7,6 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -19,7 +18,7 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, @@ -228,21 +227,24 @@ impl StaticFileProviderFactory for ProviderFactory { } impl HeaderSyncGapProvider for ProviderFactory { + type Header = HeaderTy; fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { self.provider()?.sync_gap(tip, highest_uninterrupted_block) } } impl HeaderProvider for ProviderFactory { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, @@ -270,7 +272,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -280,7 +285,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -292,15 +300,15 @@ impl HeaderProvider for ProviderFactory { fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -385,7 +393,7 @@ impl BlockReader for ProviderFactory { self.provider()?.pending_block_and_receipts() } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.provider()?.ommers(id) } @@ -570,7 +578,7 @@ impl StageCheckpointReader for ProviderFactory { } } -impl EvmEnvProvider for ProviderFactory { +impl EvmEnvProvider> for ProviderFactory { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -579,7 +587,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -588,11 +596,11 @@ impl EvmEnvProvider for ProviderFactory { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -604,7 +612,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -612,11 +620,11 @@ impl EvmEnvProvider for ProviderFactory { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index cfbe20cf4b4..9dddbb9c0a7 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -19,7 +19,7 @@ use crate::{ StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{ eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, @@ -50,10 +50,11 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, }; use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -326,7 +327,7 @@ impl DatabaseProvider StateCommitmentProvider for DatabaseProvi type StateCommitment = N::StateCommitment; } -impl DatabaseProvider { +impl< + Tx: DbTx + DbTxMut + 'static, + N: NodeTypesForProvider>, + > DatabaseProvider +{ // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** @@ -560,8 +565,7 @@ impl DatabaseProvider { construct_block: BF, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(BlockNumber) -> ProviderResult>, BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { @@ -610,8 +614,7 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(RangeInclusive) -> ProviderResult>, F: FnMut(H, BodyTy, Range) -> ProviderResult, { @@ -634,7 +637,7 @@ impl DatabaseProvider { // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header.as_ref().number)? + block_body_cursor.seek_exact(header.as_ref().number())? { let tx_range = block_body_indices.tx_num_range(); present_headers.push((header, tx_range)); @@ -678,8 +681,7 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { @@ -943,12 +945,16 @@ impl ChangeSetReader for DatabaseProvider { } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider + for DatabaseProvider +{ + type Header = HeaderTy; + fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { let static_file_provider = self.static_file_provider(); // Make sure Headers static file is at the same height. If it's further, this @@ -987,10 +993,10 @@ impl HeaderSyncGapProvider for DatabaseProvide } } -impl> HeaderProvider - for DatabaseProvider -{ - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl HeaderProvider for DatabaseProvider { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { Ok(self.header_by_number(num)?) } else { @@ -998,12 +1004,12 @@ impl> HeaderProvi } } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, |static_file| static_file.header_by_number(num), - || Ok(self.tx.get::(num)?), + || Ok(self.tx.get::>(num)?), ) } @@ -1030,17 +1036,25 @@ impl> HeaderProvi ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, _| static_file.headers_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range).map_err(Into::into) + }, |_| true, ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -1061,15 +1075,17 @@ impl> HeaderProvi fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, predicate| static_file.sealed_headers_while(range, predicate), |range, mut predicate| { let mut headers = vec![]; - for entry in self.tx.cursor_read::()?.walk_range(range)? { + for entry in + self.tx.cursor_read::>()?.walk_range(range)? + { let (number, header) = entry?; let hash = self .block_hash(number)? @@ -1210,7 +1226,7 @@ impl BlockReader for DatabaseProvid /// /// If the block is not found, this returns `None`. /// If the block exists, but doesn't contain ommers, this returns `None`. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(id)? { // If the Paris (Merge) hardfork block is known and block is after it, return empty // ommers. @@ -1218,7 +1234,8 @@ impl BlockReader for DatabaseProvid return Ok(Some(Vec::new())) } - let ommers = self.tx.get::(number)?.map(|o| o.ommers); + let ommers = + self.tx.get::>(number)?.map(|o| o.ommers); return Ok(ommers) } @@ -1450,9 +1467,9 @@ impl TransactionsProvider for Datab index, block_hash, block_number, - base_fee: header.base_fee_per_gas, - excess_blob_gas: header.excess_blob_gas, - timestamp: header.timestamp, + base_fee: header.base_fee_per_gas(), + excess_blob_gas: header.excess_blob_gas(), + timestamp: header.timestamp(), }; return Ok(Some((transaction, meta))) @@ -1618,7 +1635,7 @@ impl> Withdrawals } } -impl> EvmEnvProvider +impl EvmEnvProvider> for DatabaseProvider { fn fill_env_at( @@ -1629,7 +1646,7 @@ impl> EvmEnvProvi evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1640,15 +1657,15 @@ impl> EvmEnvProvi &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -1660,7 +1677,7 @@ impl> EvmEnvProvi evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1670,15 +1687,15 @@ impl> EvmEnvProvi fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } @@ -2813,18 +2830,18 @@ impl BlockWrite block: SealedBlockWithSenders, write_to: StorageLocation, ) -> ProviderResult { - let block_number = block.number; + let block_number = block.number(); let mut durations_recorder = metrics::DurationsRecorder::default(); // total difficulty let ttd = if block_number == 0 { - block.difficulty + block.difficulty() } else { let parent_block_number = block_number - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.difficulty + parent_ttd + block.difficulty() }; if write_to.database() { @@ -2832,7 +2849,8 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; + self.tx + .put::>>(block_number, block.header.as_ref().clone())?; durations_recorder.record_relative(metrics::Action::InsertHeaders); self.tx.put::(block_number, ttd.into())?; @@ -2979,7 +2997,7 @@ impl BlockWrite self.tx.delete::(hash, None)?; rev_headers.delete_current()?; } - self.remove::(block + 1..)?; + self.remove::>>(block + 1..)?; self.remove::(block + 1..)?; // First transaction to be removed @@ -3063,10 +3081,10 @@ impl BlockWrite return Ok(()) } - let first_number = blocks.first().unwrap().number; + let first_number = blocks.first().unwrap().number(); let last = blocks.last().unwrap(); - let last_block_number = last.number; + let last_block_number = last.number(); let mut durations_recorder = metrics::DurationsRecorder::default(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 6631b5b1b31..44cd5554bee 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,6 +1,6 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + BlockSource, BlockchainTreePendingStateProvider, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, @@ -14,6 +14,7 @@ use alloy_eips::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, }; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_rpc_types_engine::ForkchoiceState; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -24,13 +25,16 @@ use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{ + BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy, +}; use reth_primitives::{ Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::CanonChainTracker; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -61,7 +65,6 @@ mod bundle_state_provider; pub use bundle_state_provider::BundleStateProvider; mod consistent_view; -use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; @@ -77,11 +80,7 @@ where Self: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives< - SignedTx: Value, - Receipt: Value, - BlockHeader = alloy_consensus::Header, - >, + Primitives: FullNodePrimitives, >, { } @@ -90,11 +89,7 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives< - SignedTx: Value, - Receipt: Value, - BlockHeader = alloy_consensus::Header, - >, + Primitives: FullNodePrimitives, > { } @@ -151,7 +146,7 @@ impl BlockchainProvider { } } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker, alongside the finalized header /// if it exists. @@ -261,7 +256,9 @@ impl StaticFileProviderFactory for BlockchainProvider { } } -impl HeaderProvider for BlockchainProvider { +impl HeaderProvider for BlockchainProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) } @@ -593,7 +590,7 @@ impl StageCheckpointReader for BlockchainProvider { } } -impl EvmEnvProvider for BlockchainProvider { +impl EvmEnvProvider for BlockchainProvider { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -666,7 +663,7 @@ impl ChainSpecProvider for BlockchainProvider { } } -impl StateProviderFactory for BlockchainProvider { +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -840,10 +837,9 @@ impl BlockchainTreeViewer for BlockchainProvider { } } -impl CanonChainTracker for BlockchainProvider -where - Self: BlockReader, -{ +impl CanonChainTracker for BlockchainProvider { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.chain_info.on_forkchoice_update_received(); @@ -874,10 +870,7 @@ where } } -impl BlockReaderIdExt for BlockchainProvider -where - Self: BlockReader + ReceiptProviderIdExt, -{ +impl BlockReaderIdExt for BlockchainProvider { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), @@ -896,7 +889,10 @@ where } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { Ok(match id { BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), BlockNumberOrTag::Finalized => { @@ -912,7 +908,7 @@ where fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), @@ -927,21 +923,24 @@ where } } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -968,7 +967,9 @@ impl CanonStateSubscriptions for BlockchainProvider { } } -impl ForkChoiceSubscriptions for BlockchainProvider { +impl ForkChoiceSubscriptions for BlockchainProvider { + type Header = HeaderTy; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.chain_info.subscribe_safe_block(); ForkChoiceNotifications(receiver) diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 659b093d9d6..8f2d002ab89 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,7 +6,6 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; -use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; @@ -15,7 +14,7 @@ use reth_db::{ BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, TotalDifficultyMask, TransactionMask, }, - table::Decompress, + table::{Decompress, Value}, }; use reth_node_types::NodePrimitives; use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; @@ -90,17 +89,19 @@ impl<'a, N: NodePrimitives> StaticFileJarProvider<'a, N> { } } -impl HeaderProvider for StaticFileJarProvider<'_, N> { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileJarProvider<'_, N> { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -115,14 +116,17 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut headers = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(header) = cursor.get_one::>(num.into())? { + if let Some(header) = cursor.get_one::>(num.into())? { headers.push(header); } } @@ -130,18 +134,21 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { Ok(headers) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } fn sealed_headers_while( &self, range: impl RangeBounds, - mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + mut predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { let range = to_range(range); let mut cursor = self.cursor()?; @@ -149,7 +156,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 3b49f8d401f..eca382af76c 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1235,12 +1235,14 @@ impl StaticFileWriter for StaticFileProvider { } } -impl HeaderProvider for StaticFileProvider { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileProvider { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1250,7 +1252,7 @@ impl HeaderProvider for StaticFileProvider { }) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { @@ -1283,16 +1285,22 @@ impl HeaderProvider for StaticFileProvider { }) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } - fn sealed_header(&self, num: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + num: BlockNumber, + ) -> ProviderResult>> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { @@ -1307,14 +1315,14 @@ impl HeaderProvider for StaticFileProvider { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1385,8 +1393,8 @@ impl> Rec } } -impl> TransactionsProviderExt - for StaticFileProvider +impl> + TransactionsProviderExt for StaticFileProvider { fn transaction_hashes_by_range( &self, @@ -1582,7 +1590,9 @@ impl BlockNumReader for StaticFileProvider { } } -impl> BlockReader for StaticFileProvider { +impl> BlockReader + for StaticFileProvider +{ type Block = N::Block; fn find_block_by_hash( @@ -1618,7 +1628,7 @@ impl> BlockReader for Sta Err(ProviderError::UnsupportedProvider) } - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 6f5335ec665..b7f60c16442 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,7 +2,7 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; @@ -526,16 +526,19 @@ impl StaticFileProviderRW { /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header( &mut self, - header: &Header, + header: &N::BlockHeader, total_difficulty: U256, hash: &BlockHash, - ) -> ProviderResult<()> { + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - self.increment_block(header.number)?; + self.increment_block(header.number())?; self.append_column(header)?; self.append_column(CompactU256::from(total_difficulty))?; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 12c0330ac0e..385e5e8205d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -179,6 +179,8 @@ impl DatabaseProviderFactory for MockEthProvider { } impl HeaderProvider for MockEthProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.get(block_hash).cloned()) diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index ff6b3fccbe1..5120afffa85 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -284,6 +284,8 @@ impl ReceiptProvider for NoopProvider { impl ReceiptProviderIdExt for NoopProvider {} impl HeaderProvider for NoopProvider { + type Header = Header; + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { Ok(None) } @@ -586,6 +588,8 @@ impl CanonStateSubscriptions for NoopProvider { } impl ForkChoiceSubscriptions for NoopProvider { + type Header = Header; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let (_, rx) = watch::channel(None); ForkChoiceNotifications(rx) diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 0d28f83739b..be485839f00 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,7 +7,7 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. @@ -15,14 +15,18 @@ pub trait FullProvider: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin @@ -34,14 +38,18 @@ impl FullProvider for T where T: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 5ce7e119730..b572750d4a2 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use reth_network_p2p::headers::downloader::SyncTarget; @@ -7,21 +8,21 @@ use tokio::sync::watch; /// Represents a gap to sync: from `local_head` to `target` #[derive(Clone, Debug)] -pub struct HeaderSyncGap { +pub struct HeaderSyncGap { /// The local head block. Represents lower bound of sync range. - pub local_head: SealedHeader, + pub local_head: SealedHeader, /// The sync target. Represents upper bound of sync range. pub target: SyncTarget, } -impl HeaderSyncGap { +impl HeaderSyncGap { /// Returns `true` if the gap from the head to the target was closed #[inline] pub fn is_closed(&self) -> bool { match self.target.tip() { BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash, - BlockHashOrNumber::Number(num) => self.local_head.number == num, + BlockHashOrNumber::Number(num) => self.local_head.number() == num, } } } @@ -29,6 +30,9 @@ impl HeaderSyncGap { /// Client trait for determining the current headers sync gap. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderSyncGapProvider: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Find a current sync gap for the headers depending on the last /// uninterrupted block number. Last uninterrupted block represents the block number before /// which there are no gaps. It's up to the caller to ensure that last uninterrupted block is @@ -37,5 +41,5 @@ pub trait HeaderSyncGapProvider: Send + Sync { &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult; + ) -> ProviderResult>; } diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 204e9027da2..917796038e9 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,7 +2,6 @@ use crate::{ BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; @@ -57,6 +56,7 @@ pub trait BlockReader: /// The block type this provider reads. type Block: reth_primitives_traits::Block< Body: reth_primitives_traits::BlockBody, + Header = Self::Header, >; /// Tries to find in the given block source. @@ -98,7 +98,7 @@ pub trait BlockReader: /// Returns the ommers/uncle headers of the given block from the database. /// /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; /// Returns the block with matching hash from the database. /// @@ -187,7 +187,7 @@ impl BlockReader for std::sync::Arc { ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { T::ommers(self, id) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -256,7 +256,7 @@ impl BlockReader for &T { ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { T::ommers(self, id) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -321,7 +321,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn pending_header(&self) -> ProviderResult> { + fn pending_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) } @@ -329,7 +329,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn latest_header(&self) -> ProviderResult> { + fn latest_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) } @@ -337,7 +337,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn safe_header(&self) -> ProviderResult> { + fn safe_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) } @@ -345,7 +345,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn finalized_header(&self) -> ProviderResult> { + fn finalized_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) } @@ -378,7 +378,10 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the header with matching tag from the database /// /// Returns `None` if header is not found. - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) } @@ -389,7 +392,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) @@ -398,22 +401,28 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the sealed header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>; /// Returns the header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn header_by_id(&self, id: BlockId) -> ProviderResult>; + fn header_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { + fn ommers_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult>> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) } /// Returns the ommers with the matching `BlockId` from the database. /// /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } /// Functionality to read the last known chain blocks from the database. diff --git a/crates/storage/storage-api/src/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs index 39f8639dd27..b6f58b7e73f 100644 --- a/crates/storage/storage-api/src/chain_info.rs +++ b/crates/storage/storage-api/src/chain_info.rs @@ -4,6 +4,9 @@ use std::time::Instant; /// A type that can track updates related to fork choice updates. pub trait CanonChainTracker: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Notify the tracker about a received fork choice update. fn on_forkchoice_update_received(&self, update: &ForkchoiceState); @@ -19,11 +22,11 @@ pub trait CanonChainTracker: Send + Sync { fn last_exchanged_transition_configuration_timestamp(&self) -> Option; /// Sets the canonical head of the chain. - fn set_canonical_head(&self, header: SealedHeader); + fn set_canonical_head(&self, header: SealedHeader); /// Sets the safe block of the chain. - fn set_safe(&self, header: SealedHeader); + fn set_safe(&self, header: SealedHeader); /// Sets the finalized block of the chain. - fn set_finalized(&self, header: SealedHeader); + fn set_finalized(&self, header: SealedHeader); } diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index c068f7c1d29..2f1c9750edb 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,34 +1,40 @@ -use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; use reth_primitives::SealedHeader; +use reth_primitives_traits::BlockHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; /// Client trait for fetching `Header` related data. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { + /// The header type this provider supports. + type Header: BlockHeader; + /// Check if block is known fn is_known(&self, block_hash: &BlockHash) -> ProviderResult { self.header(block_hash).map(|header| header.is_some()) } /// Get header by block hash - fn header(&self, block_hash: &BlockHash) -> ProviderResult>; + fn header(&self, block_hash: &BlockHash) -> ProviderResult>; /// Retrieves the header sealed by the given block hash. - fn sealed_header_by_hash(&self, block_hash: BlockHash) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + block_hash: BlockHash, + ) -> ProviderResult>> { Ok(self.header(&block_hash)?.map(|header| SealedHeader::new(header, block_hash))) } /// Get header by block number - fn header_by_number(&self, num: u64) -> ProviderResult>; + fn header_by_number(&self, num: u64) -> ProviderResult>; /// Get header by block number or hash fn header_by_hash_or_number( &self, hash_or_num: BlockHashOrNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { match hash_or_num { BlockHashOrNumber::Hash(hash) => self.header(&hash), BlockHashOrNumber::Number(num) => self.header_by_number(num), @@ -42,16 +48,22 @@ pub trait HeaderProvider: Send + Sync { fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; /// Get headers in range of block numbers - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult>; + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>; /// Get a single sealed header by block number. - fn sealed_header(&self, number: BlockNumber) -> ProviderResult>; + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>>; /// Get headers in range of block numbers. fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } @@ -59,6 +71,6 @@ pub trait HeaderProvider: Send + Sync { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult>; + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>>; } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index cb75af7db17..1a5fd839926 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -7,6 +7,7 @@ use crate::{ traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, PoolUpdateKind, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Address, BlockHash, BlockNumber}; use futures_util::{ @@ -110,11 +111,13 @@ pub async fn maintain_transaction_pool( let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { - block_gas_limit: latest.gas_limit, + block_gas_limit: latest.gas_limit(), last_seen_block_hash: latest.hash(), - last_seen_block_number: latest.number, + last_seen_block_number: latest.number(), pending_basefee: latest - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp + 12)) + .next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest.timestamp() + 12), + ) .unwrap_or_default(), pending_blob_fee: latest.next_block_blob_fee(), }; diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 3310d1cbd67..ec278ac1cc1 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -14,6 +14,7 @@ reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true +alloy-consensus.workspace = true alloy-rpc-types-eth.workspace = true alloy-primitives.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 9f95fb51d91..727bd1bfff3 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{Address, B256}; use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; @@ -193,7 +194,7 @@ fn receipts_provider_example< // receipts and do something with the data // 1. get the bloom from the header let header = provider.header_by_number(header_num)?.unwrap(); - let bloom = header.logs_bloom; + let bloom = header.logs_bloom(); // 2. Construct the address/topics filters // For a hypothetical address, we'll want to filter down for a specific indexed topic (e.g. From 6789ff4a1ecbf8d92f076817abdc0f48b00b794b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:56:55 +0000 Subject: [PATCH 818/970] chore: add `OpEthApiBuilder` and `OpEthApiInner` (#13009) --- Cargo.lock | 1 - crates/optimism/bin/src/main.rs | 9 +- crates/optimism/node/src/node.rs | 47 +++++-- crates/optimism/node/tests/it/builder.rs | 9 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/block.rs | 2 +- crates/optimism/rpc/src/eth/call.rs | 4 +- crates/optimism/rpc/src/eth/mod.rs | 130 +++++++++++++------ crates/optimism/rpc/src/eth/pending_block.rs | 2 +- crates/optimism/rpc/src/eth/receipt.rs | 4 +- crates/optimism/rpc/src/eth/transaction.rs | 5 +- 11 files changed, 145 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a9d5222025..2b02dc2e3e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8483,7 +8483,6 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-debug", "alloy-rpc-types-eth", - "derive_more 1.0.0", "jsonrpsee-core", "jsonrpsee-types", "op-alloy-consensus", diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 6494298ba39..82fb3c24195 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -3,9 +3,9 @@ #![cfg(feature = "optimism")] use clap::Parser; -use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; +use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher, Node}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OpAddOns, OpNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -27,7 +27,6 @@ fn main() { tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); } let use_legacy_engine = rollup_args.legacy; - let sequencer_http_arg = rollup_args.sequencer_http.clone(); match use_legacy_engine { false => { let engine_tree_config = TreeConfig::default() @@ -35,8 +34,8 @@ fn main() { .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder .with_types_and_provider::>() - .with_components(OpNode::components(rollup_args)) - .with_add_ons(OpAddOns::new(sequencer_http_arg)) + .with_components(OpNode::components(rollup_args.clone())) + .with_add_ons(OpNode::new(rollup_args).add_ons()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index bdf8c3f58ee..73adcb43f18 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -31,7 +31,7 @@ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::{ witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, - OpEthApi, + OpEthApi, SequencerClient, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::BlockBody; @@ -178,12 +178,11 @@ where OpAddOns>::Components>>; fn components_builder(&self) -> Self::ComponentsBuilder { - let Self { args } = self; - Self::components(args.clone()) + Self::components(self.args.clone()) } fn add_ons(&self) -> Self::AddOns { - OpAddOns::new(self.args.sequencer_http.clone()) + Self::AddOns::builder().with_sequencer(self.args.sequencer_http.clone()).build() } } @@ -204,14 +203,14 @@ pub struct OpAddOns(pub RpcAddOns, OpEngin impl>> Default for OpAddOns { fn default() -> Self { - Self::new(None) + Self::builder().build() } } impl>> OpAddOns { - /// Create a new instance with the given `sequencer_http` URL. - pub fn new(sequencer_http: Option) -> Self { - Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) + /// Build a [`OpAddOns`] using [`OpAddOnsBuilder`]. + pub fn builder() -> OpAddOnsBuilder { + OpAddOnsBuilder::default() } } @@ -270,6 +269,38 @@ where } } +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct OpAddOnsBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +impl OpAddOnsBuilder { + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client.map(SequencerClient::new); + self + } +} + +impl OpAddOnsBuilder { + /// Builds an instance of [`OpAddOns`]. + pub fn build(self) -> OpAddOns + where + N: FullNodeComponents>, + { + let Self { sequencer_client, .. } = self; + + OpAddOns(RpcAddOns::new( + move |ctx| OpEthApi::::builder().with_sequencer(sequencer_client).build(ctx), + Default::default(), + )) + } +} + /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 67cac17d398..875b282e0ad 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -2,20 +2,21 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; -use reth_node_builder::{NodeBuilder, NodeConfig}; +use reth_node_builder::{Node, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OpAddOns, OpNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; #[test] fn test_basic_setup() { // parse CLI -> config let config = NodeConfig::new(BASE_MAINNET.clone()); let db = create_test_rw_db(); + let args = RollupArgs::default(); let _builder = NodeBuilder::new(config) .with_database(db) .with_types::() - .with_components(OpNode::components(Default::default())) - .with_add_ons(OpAddOns::new(None)) + .with_components(OpNode::components(args.clone())) + .with_add_ons(OpNode::new(args).add_ons()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 50194f39aa3..4b25066d675 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -61,7 +61,6 @@ serde_json.workspace = true # misc thiserror.workspace = true tracing.workspace = true -derive_more = { workspace = true, features = ["constructor", "deref"] } [dev-dependencies] reth-optimism-chainspec.workspace = true diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 64a55496993..92b4353ec9e 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -57,7 +57,7 @@ where }; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), tx, meta, receipt, diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 9495a359e32..c5e96bb87d1 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -33,12 +33,12 @@ where { #[inline] fn call_gas_limit(&self) -> u64 { - self.inner.gas_cap() + self.inner.eth_api.gas_cap() } #[inline] fn max_simulate_blocks(&self) -> u64 { - self.inner.max_simulate_blocks() + self.inner.eth_api.max_simulate_blocks() } fn create_txn_env( diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 6b909f012c5..27672804839 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -14,7 +14,6 @@ use std::{fmt, sync::Arc}; use alloy_consensus::Header; use alloy_primitives::U256; -use derive_more::Deref; use op_alloy_network::Optimism; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; @@ -59,14 +58,10 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Deref, Clone)] +#[derive(Clone)] pub struct OpEthApi { /// Gateway to node's core components. - #[deref] - inner: Arc>, - /// Sequencer client, configured to forward submitted transactions to sequencer of given OP - /// network. - sequencer_client: Option, + inner: Arc>, } impl OpEthApi @@ -79,28 +74,9 @@ where + 'static, >, { - /// Creates a new instance for given context. - pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { - let blocking_task_pool = - BlockingTaskPool::build().expect("failed to build blocking task pool"); - - let inner = EthApiInner::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.network.clone(), - ctx.cache.clone(), - ctx.new_gas_price_oracle(), - ctx.config.rpc_gas_cap, - ctx.config.rpc_max_simulate_blocks, - ctx.config.eth_proof_window, - blocking_task_pool, - ctx.new_fee_history_cache(), - ctx.evm_config.clone(), - ctx.executor.clone(), - ctx.config.proof_permits, - ); - - Self { inner: Arc::new(inner), sequencer_client: sequencer_http.map(SequencerClient::new) } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() } } @@ -130,17 +106,17 @@ where #[inline] fn pool(&self) -> &Self::Pool { - self.inner.pool() + self.inner.eth_api.pool() } #[inline] fn evm_config(&self) -> &Self::Evm { - self.inner.evm_config() + self.inner.eth_api.evm_config() } #[inline] fn network(&self) -> &Self::Network { - self.inner.network() + self.inner.eth_api.network() } #[inline] @@ -150,7 +126,7 @@ where #[inline] fn provider(&self) -> &Self::Provider { - self.inner.provider() + self.inner.eth_api.provider() } } @@ -160,7 +136,7 @@ where { #[inline] fn cache(&self) -> &EthStateCache { - self.inner.cache() + self.inner.eth_api.cache() } } @@ -175,12 +151,12 @@ where { #[inline] fn starting_block(&self) -> U256 { - self.inner.starting_block() + self.inner.eth_api.starting_block() } #[inline] fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + self.inner.eth_api.signers() } } @@ -191,17 +167,17 @@ where { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { - self.inner.task_spawner() + self.inner.eth_api.task_spawner() } #[inline] fn tracing_task_pool(&self) -> &BlockingTaskPool { - self.inner.blocking_task_pool() + self.inner.eth_api.blocking_task_pool() } #[inline] fn tracing_task_guard(&self) -> &BlockingTaskGuard { - self.inner.blocking_task_guard() + self.inner.eth_api.blocking_task_guard() } } @@ -217,12 +193,12 @@ where { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { - self.inner.gas_oracle() + self.inner.eth_api.gas_oracle() } #[inline] fn fee_history_cache(&self) -> &FeeHistoryCache { - self.inner.fee_history_cache() + self.inner.eth_api.fee_history_cache() } } @@ -241,7 +217,7 @@ where { #[inline] fn max_proof_window(&self) -> u64 { - self.inner.eth_proof_window() + self.inner.eth_api.eth_proof_window() } } @@ -264,7 +240,7 @@ where N: RpcNodeCore, { fn with_dev_accounts(&self) { - *self.inner.signers().write() = DevSigner::random_signers(20) + *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } @@ -273,3 +249,71 @@ impl fmt::Debug for OpEthApi { f.debug_struct("OpEthApi").finish_non_exhaustive() } } + +/// Container type `OpEthApi` +#[allow(missing_debug_implementations)] +struct OpEthApiInner { + /// Gateway to node's core components. + eth_api: EthApiNodeBackend, + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +/// A type that knows how to build a [`OpEthApi`]. +#[derive(Debug, Default)] +pub struct OpEthApiBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +impl OpEthApiBuilder { + /// Creates a [`OpEthApiBuilder`] instance from [`EthApiBuilderCtx`]. + pub const fn new() -> Self { + Self { sequencer_client: None } + } + + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client; + self + } +} + +impl OpEthApiBuilder { + /// Builds an instance of [`OpEthApi`] + pub fn build(self, ctx: &EthApiBuilderCtx) -> OpEthApi + where + N: RpcNodeCore< + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, + >, + { + let blocking_task_pool = + BlockingTaskPool::build().expect("failed to build blocking task pool"); + + let eth_api = EthApiInner::new( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.network.clone(), + ctx.cache.clone(), + ctx.new_gas_price_oracle(), + ctx.config.rpc_gas_cap, + ctx.config.rpc_max_simulate_blocks, + ctx.config.eth_proof_window, + blocking_task_pool, + ctx.new_fee_history_cache(), + ctx.evm_config.clone(), + ctx.executor.clone(), + ctx.config.proof_permits, + ); + + OpEthApi { + inner: Arc::new(OpEthApiInner { eth_api, sequencer_client: self.sequencer_client }), + } + } +} diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 852c4454f06..fec610bb1e9 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -37,7 +37,7 @@ where { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { - self.inner.pending_block() + self.inner.eth_api.pending_block() } /// Returns the locally built pending block diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index e803ea21019..2a4df1ada49 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -31,6 +31,8 @@ where receipt: Receipt, ) -> Result, Self::Error> { let (block, receipts) = self + .inner + .eth_api .cache() .get_block_and_receipts(meta.block_hash) .await @@ -43,7 +45,7 @@ where reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), &tx, meta, &receipt, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3202dc46ad1..2b92927f649 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -23,7 +23,7 @@ where N: RpcNodeCore, { fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + self.inner.eth_api.signers() } /// Decodes and recovers the transaction and submits it to the pool. @@ -68,7 +68,7 @@ where { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.clone() + self.inner.sequencer_client.clone() } } @@ -106,6 +106,7 @@ where } reth_primitives::Transaction::Deposit(tx) => { self.inner + .eth_api .provider() .receipt_by_hash(hash) .map_err(Self::Error::from_eth_err)? From 8a047ed6e437a87454c3d6ab5f19f5029b6af9de Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 17:02:19 +0100 Subject: [PATCH 819/970] chore: move Integerlist to db-api (#13062) --- Cargo.lock | 3 +- crates/net/ecies/Cargo.toml | 2 +- crates/primitives-traits/Cargo.toml | 2 - crates/primitives-traits/src/integer_list.rs | 196 ------------------ crates/primitives-traits/src/lib.rs | 3 - crates/storage/db-api/Cargo.toml | 1 + .../storage/db-api/src/models/integer_list.rs | 181 +++++++++++++++- crates/storage/db-api/src/models/mod.rs | 6 +- crates/storage/db-common/src/init.rs | 3 +- .../storage/db/src/implementation/mdbx/mod.rs | 5 +- .../db/src/tables/codecs/fuzz/inputs.rs | 2 +- .../storage/db/src/tables/codecs/fuzz/mod.rs | 3 - crates/storage/db/src/tables/mod.rs | 8 +- crates/storage/libmdbx-rs/Cargo.toml | 2 +- 14 files changed, 195 insertions(+), 222 deletions(-) delete mode 100644 crates/primitives-traits/src/integer_list.rs diff --git a/Cargo.lock b/Cargo.lock index 2b02dc2e3e2..b39cd76e09b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6953,6 +6953,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "roaring", "serde", "test-fuzz", ] @@ -8670,7 +8671,6 @@ dependencies = [ "rand 0.8.5", "reth-codecs", "revm-primitives", - "roaring", "serde", "serde_json", "serde_with", @@ -9720,7 +9720,6 @@ checksum = "f81dc953b2244ddd5e7860cb0bb2a790494b898ef321d4aff8e260efab60cc88" dependencies = [ "bytemuck", "byteorder", - "serde", ] [[package]] diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index eb2a0b023b3..ec34e3e7a32 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -28,7 +28,7 @@ tracing.workspace = true # HeaderBytes generic-array.workspace = true typenum = "1.15.0" -byteorder = "1.4.3" +byteorder.workspace = true # crypto rand.workspace = true diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index ceee1e26cec..9265c878d6c 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -30,7 +30,6 @@ op-alloy-consensus = { workspace = true, optional = true } byteorder = { workspace = true, optional = true } bytes.workspace = true derive_more.workspace = true -roaring = "0.10.2" serde_with = { workspace = true, optional = true } auto_impl.workspace = true @@ -100,7 +99,6 @@ serde = [ "rand/serde", "reth-codecs?/serde", "revm-primitives/serde", - "roaring/serde", "revm-primitives/serde", "op-alloy-consensus?/serde" ] diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs deleted file mode 100644 index 6fc6d75899c..00000000000 --- a/crates/primitives-traits/src/integer_list.rs +++ /dev/null @@ -1,196 +0,0 @@ -use alloc::vec::Vec; -use core::fmt; - -use bytes::BufMut; -use derive_more::Deref; -use roaring::RoaringTreemap; - -/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. -/// -/// This structure provides excellent compression while allowing direct access to individual -/// elements without the need for full decompression. -/// -/// Key features: -/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. -/// - Direct access: elements can be accessed or queried without needing to decode the entire list. -/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit -/// integers. -#[derive(Clone, PartialEq, Default, Deref)] -pub struct IntegerList(pub RoaringTreemap); - -impl fmt::Debug for IntegerList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("IntegerList")?; - f.debug_list().entries(self.0.iter()).finish() - } -} - -impl IntegerList { - /// Creates a new empty [`IntegerList`]. - pub fn empty() -> Self { - Self(RoaringTreemap::new()) - } - - /// Creates an [`IntegerList`] from a list of integers. - /// - /// Returns an error if the list is not pre-sorted. - pub fn new(list: impl IntoIterator) -> Result { - RoaringTreemap::from_sorted_iter(list) - .map(Self) - .map_err(|_| IntegerListError::UnsortedInput) - } - - /// Creates an [`IntegerList`] from a pre-sorted list of integers. - /// - /// # Panics - /// - /// Panics if the list is not pre-sorted. - #[inline] - #[track_caller] - pub fn new_pre_sorted(list: impl IntoIterator) -> Self { - Self::new(list).expect("IntegerList must be pre-sorted and non-empty") - } - - /// Appends a list of integers to the current list. - pub fn append(&mut self, list: impl IntoIterator) -> Result { - self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) - } - - /// Pushes a new integer to the list. - pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { - self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) - } - - /// Clears the list. - pub fn clear(&mut self) { - self.0.clear(); - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_bytes(&self) -> Vec { - let mut vec = Vec::with_capacity(self.0.serialized_size()); - self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); - vec - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_mut_bytes(&self, buf: &mut B) { - self.0.serialize_into(buf.writer()).unwrap(); - } - - /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { - RoaringTreemap::deserialize_from(data) - .map(Self) - .map_err(|_| IntegerListError::FailedToDeserialize) - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for IntegerList { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use serde::ser::SerializeSeq; - - let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; - for e in &self.0 { - seq.serialize_element(&e)?; - } - seq.end() - } -} - -#[cfg(feature = "serde")] -struct IntegerListVisitor; - -#[cfg(feature = "serde")] -impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { - type Value = IntegerList; - - fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("a usize array") - } - - fn visit_seq(self, mut seq: E) -> Result - where - E: serde::de::SeqAccess<'de>, - { - let mut list = IntegerList::empty(); - while let Some(item) = seq.next_element()? { - list.push(item).map_err(serde::de::Error::custom)?; - } - Ok(list) - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_byte_buf(IntegerListVisitor) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -use arbitrary::{Arbitrary, Unstructured}; - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> Arbitrary<'a> for IntegerList { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort_unstable(); - Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -/// Primitives error type. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum IntegerListError { - /// The provided input is unsorted. - #[display("the provided input is unsorted")] - UnsortedInput, - /// Failed to deserialize data into type. - #[display("failed to deserialize data into type")] - FailedToDeserialize, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn empty_list() { - assert_eq!(IntegerList::empty().len(), 0); - assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); - } - - #[test] - fn test_integer_list() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - assert_eq!(ef_list.iter().collect::>(), original_list); - } - - #[test] - fn test_integer_list_serialization() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let blist = ef_list.to_bytes(); - assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) - } - - #[test] - fn serde_serialize_deserialize() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let serde_out = serde_json::to_string(&ef_list).unwrap(); - let serde_ef_list = serde_json::from_str::(&serde_out).unwrap(); - assert_eq!(serde_ef_list, ef_list); - } -} diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index c88da5ad7a7..04d02be0b7d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -31,9 +31,6 @@ pub use transaction::{ FullTransaction, Transaction, }; -mod integer_list; -pub use integer_list::{IntegerList, IntegerListError}; - pub mod block; pub use block::{ body::{BlockBody, FullBlockBody}, diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 3aa908a6009..05581b9725d 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -29,6 +29,7 @@ alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true +roaring = "0.10.2" parity-scale-codec = { version = "3.2.1", features = ["bytes"] } serde = { workspace = true, default-features = false } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index 480b52a9e2c..5301ec303e5 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -4,7 +4,159 @@ use crate::{ table::{Compress, Decompress}, DatabaseError, }; -use reth_primitives_traits::IntegerList; +use bytes::BufMut; +use core::fmt; +use derive_more::Deref; +use roaring::RoaringTreemap; + +/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. +/// +/// This structure provides excellent compression while allowing direct access to individual +/// elements without the need for full decompression. +/// +/// Key features: +/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. +/// - Direct access: elements can be accessed or queried without needing to decode the entire list. +/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit +/// integers. +#[derive(Clone, PartialEq, Default, Deref)] +pub struct IntegerList(pub RoaringTreemap); + +impl fmt::Debug for IntegerList { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("IntegerList")?; + f.debug_list().entries(self.0.iter()).finish() + } +} + +impl IntegerList { + /// Creates a new empty [`IntegerList`]. + pub fn empty() -> Self { + Self(RoaringTreemap::new()) + } + + /// Creates an [`IntegerList`] from a list of integers. + /// + /// Returns an error if the list is not pre-sorted. + pub fn new(list: impl IntoIterator) -> Result { + RoaringTreemap::from_sorted_iter(list) + .map(Self) + .map_err(|_| IntegerListError::UnsortedInput) + } + + /// Creates an [`IntegerList`] from a pre-sorted list of integers. + /// + /// # Panics + /// + /// Panics if the list is not pre-sorted. + #[inline] + #[track_caller] + pub fn new_pre_sorted(list: impl IntoIterator) -> Self { + Self::new(list).expect("IntegerList must be pre-sorted and non-empty") + } + + /// Appends a list of integers to the current list. + pub fn append(&mut self, list: impl IntoIterator) -> Result { + self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) + } + + /// Pushes a new integer to the list. + pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { + self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) + } + + /// Clears the list. + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_bytes(&self) -> Vec { + let mut vec = Vec::with_capacity(self.0.serialized_size()); + self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); + vec + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_mut_bytes(&self, buf: &mut B) { + self.0.serialize_into(buf.writer()).unwrap(); + } + + /// Deserializes a sequence of bytes into a proper [`IntegerList`]. + pub fn from_bytes(data: &[u8]) -> Result { + RoaringTreemap::deserialize_from(data) + .map(Self) + .map_err(|_| IntegerListError::FailedToDeserialize) + } +} + +impl serde::Serialize for IntegerList { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeSeq; + + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; + for e in &self.0 { + seq.serialize_element(&e)?; + } + seq.end() + } +} + +struct IntegerListVisitor; + +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { + type Value = IntegerList; + + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("a usize array") + } + + fn visit_seq(self, mut seq: E) -> Result + where + E: serde::de::SeqAccess<'de>, + { + let mut list = IntegerList::empty(); + while let Some(item) = seq.next_element()? { + list.push(item).map_err(serde::de::Error::custom)?; + } + Ok(list) + } +} + +impl<'de> serde::Deserialize<'de> for IntegerList { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_byte_buf(IntegerListVisitor) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +use arbitrary::{Arbitrary, Unstructured}; + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> Arbitrary<'a> for IntegerList { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + let mut nums: Vec = Vec::arbitrary(u)?; + nums.sort_unstable(); + Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + +/// Primitives error type. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum IntegerListError { + /// The provided input is unsorted. + #[display("the provided input is unsorted")] + UnsortedInput, + /// Failed to deserialize data into type. + #[display("failed to deserialize data into type")] + FailedToDeserialize, +} impl Compress for IntegerList { type Compressed = Vec; @@ -23,3 +175,30 @@ impl Decompress for IntegerList { Self::from_bytes(value).map_err(|_| DatabaseError::Decode) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn empty_list() { + assert_eq!(IntegerList::empty().len(), 0); + assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); + } + + #[test] + fn test_integer_list() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + assert_eq!(ef_list.iter().collect::>(), original_list); + } + + #[test] + fn test_integer_list_serialization() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + + let blist = ef_list.to_bytes(); + assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 614dc598bdb..0a008bb88a5 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,9 +8,8 @@ use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{ - Account, Bytecode, Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType, -}; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -24,6 +23,7 @@ pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; +pub use integer_list::IntegerList; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StoredBlockBodyIndices, StoredBlockWithdrawals, }; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index d738aaec439..493b27be780 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -609,12 +609,11 @@ mod tests { use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey}, table::{Table, TableRow}, transaction::DbTx, Database, }; - use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ProviderFactory, diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 006213e4cb9..8a6811b1539 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -507,12 +507,11 @@ mod tests { use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, - models::{AccountBeforeTx, ShardedKey}, + models::{AccountBeforeTx, IntegerList, ShardedKey}, table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, StorageEntry}; - use reth_primitives_traits::IntegerList; + use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index bb26e8b9e21..da15c112e62 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -1,6 +1,6 @@ //! Curates the input coming from the fuzzer for certain types. -use reth_primitives_traits::IntegerList; +use reth_db_api::models::IntegerList; use serde::{Deserialize, Serialize}; /// Makes sure that the list provided by the fuzzer is not empty and pre-sorted diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index e64a3841df4..f6b68897e34 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -16,9 +16,6 @@ macro_rules! impl_fuzzer_with_input { pub mod $name { use reth_db_api::table; - #[allow(unused_imports)] - - #[allow(unused_imports)] use reth_primitives_traits::*; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 940bb3aa259..9ff21261eee 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -26,13 +26,13 @@ use reth_db_api::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ClientVersion, CompactU256, ShardedKey, StoredBlockBodyIndices, - StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, + StoredBlockBodyIndices, StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash}; -use reth_primitives_traits::IntegerList; +use reth_primitives::{Receipt, StorageEntry, TransactionSignedNoHash}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index fa10a73cb33..4679f4fe914 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-mdbx-sys.workspace = true bitflags.workspace = true -byteorder = "1" +byteorder.workspace = true derive_more.workspace = true indexmap = "2" parking_lot.workspace = true From 30800af6ec81521625db40cd6d3881f0e2d3f9e6 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Mon, 2 Dec 2024 23:28:47 +0700 Subject: [PATCH 820/970] perf(validate-tx-pool): fast non-allocating `is_create` (#13063) --- crates/transaction-pool/src/test_utils/mock.rs | 10 ++++++++++ crates/transaction-pool/src/traits.rs | 12 +++++++++++- crates/transaction-pool/src/validate/eth.rs | 2 +- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index afa1638c851..78982cb4657 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -719,6 +719,16 @@ impl PoolTransaction for MockTransaction { } } + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { + match self { + Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => { + to.is_create() + } + Self::Eip4844 { .. } => false, + } + } + /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { self.get_input() diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6adb81729e1..11c8db225b0 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1061,6 +1061,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// [`TxKind::Create`] if the transaction is a contract creation. fn kind(&self) -> TxKind; + /// Returns true if the transaction is a contract creation. + /// We don't provide a default implementation via `kind` as it copies the 21-byte + /// [`TxKind`] for this simple check. A proper implementation shouldn't allocate. + fn is_create(&self) -> bool; + /// Returns the recipient of the transaction if it is not a [`TxKind::Create`] /// transaction. fn to(&self) -> Option
{ @@ -1109,7 +1114,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { &self, max_init_code_size: usize, ) -> Result<(), InvalidPoolTransactionError> { - if self.kind().is_create() && self.input().len() > max_init_code_size { + if self.is_create() && self.input().len() > max_init_code_size { Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( self.size(), max_init_code_size, @@ -1328,6 +1333,11 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.kind() } + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { + self.transaction.is_create() + } + fn input(&self) -> &[u8] { self.transaction.input() } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index ca745222575..5249c1befa2 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -815,7 +815,7 @@ pub fn ensure_intrinsic_gas( let gas_after_merge = validate_initial_tx_gas( spec_id, transaction.input(), - transaction.kind().is_create(), + transaction.is_create(), transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), transaction.authorization_count() as u64, ); From aacf5d13d29d32fef3163f93763c686efbcabbad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 18:03:33 +0100 Subject: [PATCH 821/970] chore: disable more default features (#13065) --- .github/assets/check_rv32imac.sh | 2 +- Cargo.toml | 4 ++-- crates/chainspec/Cargo.toml | 3 ++- crates/consensus/consensus/Cargo.toml | 3 ++- crates/evm/execution-errors/Cargo.toml | 3 ++- crates/net/p2p/Cargo.toml | 3 ++- crates/optimism/chainspec/Cargo.toml | 23 ++++++++++++----------- crates/optimism/evm/Cargo.toml | 3 ++- crates/optimism/primitives/Cargo.toml | 2 ++ crates/primitives-traits/Cargo.toml | 4 +++- crates/primitives/Cargo.toml | 4 +++- crates/storage/errors/Cargo.toml | 3 ++- 12 files changed, 35 insertions(+), 22 deletions(-) diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index 9a66da9fe3b..9032c05b9d6 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -5,9 +5,9 @@ set +e # Disable immediate exit on error crates_to_check=( reth-codecs-derive reth-ethereum-forks + reth-primitives-traits # reth-evm # reth-primitives - # reth-primitives-traits # reth-optimism-forks # reth-optimism-chainspec ) diff --git a/Cargo.toml b/Cargo.toml index 521cfd88f8a..dcb5510e79f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -485,12 +485,12 @@ backon = { version = "1.2", default-features = false, features = [ bincode = "1.3" bitflags = "2.4" boyer-moore-magiclen = "0.2.16" -bytes = "1.5" +bytes = { version = "1.5", default-features = false } cfg-if = "1.0" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" -derive_more = { version = "1", features = ["full"] } +derive_more = { version = "1", default-features = false, features = ["full"] } dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index e5f6c058c62..67968ee0a88 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -49,7 +49,8 @@ std = [ "alloy-consensus/std", "once_cell/std", "alloy-rlp/std", - "reth-ethereum-forks/std" + "reth-ethereum-forks/std", + "derive_more/std" ] arbitrary = [ "alloy-chains/arbitrary", diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 55188dd8472..36356a4de36 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -32,7 +32,8 @@ std = [ "alloy-primitives/std", "alloy-eips/std", "alloy-consensus/std", - "reth-primitives-traits/std" + "reth-primitives-traits/std", + "derive_more/std" ] test-utils = [ "reth-primitives/test-utils", diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 3368eb06503..01727cd70b5 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -31,5 +31,6 @@ std = [ "alloy-eips/std", "alloy-primitives/std", "revm-primitives/std", - "alloy-rlp/std" + "alloy-rlp/std", + "derive_more/std" ] diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 9348bf2d041..a72110647c4 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -57,5 +57,6 @@ std = [ "alloy-eips/std", "alloy-primitives/std", "reth-primitives-traits/std", - "alloy-consensus/std", + "alloy-consensus/std", + "derive_more/std" ] diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 4e573ce2994..7f74156b885 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -46,15 +46,16 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-genesis/std", - "alloy-primitives/std", - "alloy-eips/std", - "op-alloy-rpc-types/std", - "reth-chainspec/std", - "reth-ethereum-forks/std", - "reth-primitives-traits/std", - "reth-optimism-forks/std", - "alloy-consensus/std", - "once_cell/std", + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std", + "derive_more/std" ] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 149aa2e953b..c640b130841 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -64,7 +64,8 @@ std = [ "alloy-primitives/std", "revm-primitives/std", "revm/std", - "reth-ethereum-forks/std" + "reth-ethereum-forks/std", + "derive_more/std" ] optimism = [ "reth-primitives/optimism", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 075cd0d13f4..9f370511d49 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -50,6 +50,8 @@ std = [ "alloy-eips/std", "alloy-primitives/std", "serde/std", + "bytes/std", + "derive_more/std" ] reth-codec = [ "dep:reth-codecs", diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 9265c878d6c..459fdbde1a7 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -65,7 +65,9 @@ std = [ "revm-primitives/std", "serde?/std", "serde_with?/std", - "alloy-rlp/std" + "alloy-rlp/std", + "bytes/std", + "derive_more/std" ] test-utils = [ "arbitrary", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 80299a06db6..3bfaefb39ed 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -106,7 +106,9 @@ std = [ "alloy-trie/std", "serde_with?/std", "alloy-rlp/std", - "reth-ethereum-forks/std" + "reth-ethereum-forks/std", + "bytes/std", + "derive_more/std" ] reth-codec = [ "dep:reth-codecs", diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 9a31177662f..0f2cf03f652 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -29,5 +29,6 @@ std = [ "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", - "alloy-rlp/std" + "alloy-rlp/std", + "derive_more/std" ] From 675410def1b85e1938e62d088d6952d4e647389d Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Mon, 2 Dec 2024 12:10:27 -0500 Subject: [PATCH 822/970] add replace and remove methods (#13059) Co-authored-by: dkathiriya --- crates/rpc/rpc-builder/src/auth.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 25626e4f12d..f22fd554ca6 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -221,6 +221,30 @@ impl AuthRpcModule { self.module_mut().merge(other.into()).map(|_| true) } + /// Removes the method with the given name from the configured authenticated methods. + /// + /// Returns `true` if the method was found and removed, `false` otherwise. + pub fn remove_auth_method(&mut self, method_name: &'static str) -> bool { + self.module_mut().remove_method(method_name).is_some() + } + + /// Removes the given methods from the configured authenticated methods. + pub fn remove_auth_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_auth_method(name); + } + } + + /// Replace the given [Methods] in the configured authenticated methods. + pub fn replace_auth_methods( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_auth_methods(other.method_names()); + self.merge_auth_methods(other) + } + /// Convenience function for starting a server pub async fn start_server( self, From c61a0713b44c9566c4e8157ea1e27d98756d79fb Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 2 Dec 2024 22:34:13 +0400 Subject: [PATCH 823/970] feat: bump alloy (#13070) --- Cargo.lock | 166 +++++++++++++++---------------- Cargo.toml | 64 ++++++------ crates/rpc/rpc/src/eth/bundle.rs | 1 + 3 files changed, 116 insertions(+), 115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b39cd76e09b..66cd14d1e2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1ff8439834ab71a4b0ecd1a8ff80b3921c87615f158940c3364f399c732786" +checksum = "73dd0ab7003dfa3efd252e423873cd3bc241d1456147e752f995cc8aabd1d1f6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "519a86faaa6729464365a90c04eba68539b6d3a30f426edb4b3dafd78920d42f" +checksum = "d08234c0eece0e08602db5095a16dc942cad91967cccfcfc2c6a42c25563964f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -146,9 +146,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cca2b353d8b7f160dc930dfa174557acefece6deab5ecd7e6230d38858579eea" +checksum = "6a01f5593f6878452c6dde102ece391b60cba79801c5f606f8fe898ff57cd5d7" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -161,7 +161,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dedb328c2114284f767e075589ca9de8d5e9c8a91333402f4804a584ed71a38" +checksum = "50c242de43a1869bcb2fbce3b377130959d10dfd562b87ac7aa2f04d98baac51" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -234,9 +234,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4841e8dd4e0f53d76b501fd4c6bc21d95d688bc8ebf0ea359fc6c7ab65b48742" +checksum = "9dd39b72f860cb0c542fac925f91d1939c2b14a0970b39d0ae304b5b7574a0ac" dependencies = [ "alloy-primitives", "alloy-serde", @@ -257,23 +257,23 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254f770918f96dc4ec88a15e6e2e243358e1719d66b40ef814428e7697079d25" +checksum = "6c15c11661571a19a06896663c93e804ccf013159275a89a98e892014df514d8" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", ] [[package]] name = "alloy-network" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931dd176c6e33355f3dc0170ec69cf5b951f4d73870b276e2c837ab35f9c5136" +checksum = "60dd0b99eaa5e715dd90d42021f7f08a0a70976ea84f41a0ad233770e0c1962b" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -291,14 +291,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] name = "alloy-network-primitives" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6ec0f23be233e851e31c5e4badfedfa9c7bc177bc37f4e03616072cd40a806" +checksum = "18abfc73ce48f074c8bc6e05c1f08ef0b1ddc9b04f191a821d0beb9470a42a29" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bce85f0f67b2248c2eb42941bb75079ac53648569a668e8bfd7de5a831ec64" +checksum = "f9a04cf8f3a19b024b2bc71b5774d423cd2edda7f67df6029daa1368c5c02da5" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -319,7 +319,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.3", "tracing", "url", ] @@ -358,9 +358,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5545e2cbf2f8f24c68bb887ba0294fa12a2f816b9e72c4f226cd137b77d0e294" +checksum = "4933c761f10e44d5e901804b56efb2ce6e0945e6c57d2fa1e5ace303fae6f74a" dependencies = [ "alloy-chains", "alloy-consensus", @@ -390,7 +390,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tracing", "url", @@ -399,9 +399,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b633f7731a3df2f4f334001bf80436565113816c5aa5c136c1ded563051e049b" +checksum = "808719714bfb2aa24b0eb2a38411ce8e654ba11c0ebf2a6648fcbe9fabfe696d" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -440,9 +440,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed9e40c2a73265ebf70f1e48303ee55920282e1ea5971e832873fb2d32cea74" +checksum = "6ce26c25efb8290b6ba559ae6c40bf6630d337e107ae242e5790501420dba7b7" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -465,9 +465,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dea20fa715a6f39ec7adc735cfd9567342870737270ac67795d55896527772" +checksum = "41080ce2640928f0df45c41d2af629b88db3cb31af3abbe614964ae10001ddac" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -478,9 +478,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9eab93eabf53697b4b9095c0f9203fca3702e78d083c77a5c677bdc02bebab8" +checksum = "db981579da4d597d9d35f56ad7641b929bf8f551ab696715132f554863c83540" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -490,9 +490,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2750f4f694b27461915b9794df60177198bf733da38dde71aadfbe2946a3c0be" +checksum = "252b7433e731e5d24f7eb7a54a368bc813a1086aaf84643ab10e99599a6ff16c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -502,11 +502,10 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d7620e22d6ed7c58451dd303d0501ade5a8bec9dc8daef0fbc48ceffabbae1" +checksum = "abca110e59f760259e26d0c84912121468008aba48dd227af0f306cfd7bce9ae" dependencies = [ - "alloy-consensus", "alloy-consensus-any", "alloy-rpc-types-eth", "alloy-serde", @@ -514,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbfc1b5ee81b1ef6d5e770f3bd6018eab66c7ac2ee1e897f88973b327e2fc20" +checksum = "45c8db5fb70d2fece7bc1cd5adf42e72fc8a23547adeff8f558d9063f1e7788c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -524,14 +523,14 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] name = "alloy-rpc-types-debug" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d2d4a265fb1198272cc43d8d418c0423cdfc1aebcd283be9105464874a1dda" +checksum = "ea3a662ced0bfbe582d26ed85d6a0092310787331555c8f7a86f843c7ca272ef" dependencies = [ "alloy-primitives", "serde", @@ -539,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb843daa6feb011475f0db8c499fff5ac62e1e6012fc01d97477ddb3217a83f" +checksum = "d3b000c7f3469e7faa575ba70207294cf07e91dfd6ce4d04d5d5d8069f974a66" dependencies = [ "alloy-consensus", "alloy-eips", @@ -560,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df34b88df4deeac9ecfc80ad7cbb26a33e57437b9db8be5b952792feef6134bc" +checksum = "3468e7385fbb86b0fde5497d685c02f765ea09d36f7e07c5d1c9a52b077d38e2" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -582,12 +581,13 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be601847f0b13112249ed577eaa7501755e7dd3df7b037088f8b8236a4602d59" +checksum = "26988fb56d87414c96b8fd9b69ad6ce3768bc9acc953ed02c18a66f74ab98c66" dependencies = [ "alloy-eips", "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", @@ -595,23 +595,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db32f30a55ea4fa9d893127a84eef52fc54d23acb34c1a5a39bfe9bd95fbc149" +checksum = "7a90be1bc8e3659db1c9512191873a268a917efbc62b8bd39a92c12bf613b193" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] name = "alloy-rpc-types-txpool" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1588d8d799095a9bd55d9045b76add042ab725c37316a77da933683754aa4b" +checksum = "beade2858d292442f5be6fce452c923072a7ac4d3898d333abf42703945444d0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -621,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a89fd4cc3f96b3c5c0dd1cebeb63323e4659bbdc837117fa3fd5ac168df7d9" +checksum = "42de6002e2154b50b3568aea27e26bd9caf7b754658f43065f2e9b6ee0a8c839" dependencies = [ "alloy-primitives", "arbitrary", @@ -633,23 +633,23 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532010243a96d1f8593c2246ec3971bc52303884fa1e43ca0a776798ba178910" +checksum = "f288a9a25e2578dab17845fd8d2be1d32de33565783ed185ded161a65f92381b" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", "k256", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] name = "alloy-signer-local" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8080c0ab2dc729b0cbb183843d08e78d2a1629140c9fc16234d2272abb483bd" +checksum = "0d8081f589ddc11a959605e30c723d51cad2562d9072305f8e3ef311f077e5eb" dependencies = [ "alloy-consensus", "alloy-network", @@ -660,7 +660,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 1.0.69", + "thiserror 2.0.3", ] [[package]] @@ -735,9 +735,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6f295f4b745fb9e4e663d70bc57aed991288912c7aaaf25767def921050ee43" +checksum = "90352f4cf78017905c3244f48b38fadc345970bbc9095087c0f985a580550488" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -745,7 +745,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.3", "tokio", "tower 0.5.1", "tracing", @@ -755,9 +755,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39139015a5ec127d9c895b49b484608e27fe4538544f84cdf5eae0bd36339bc6" +checksum = "7d26c94d51fa8b1aee3d15db113dd0773776c02bb36dbaa2590b900dadd7e7d0" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -770,9 +770,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b4f865b13bb8648e93f812b19b74838b9165212a2beb95fc386188c443a5e3" +checksum = "14c498fcdec50650be6b6a22ce7928a1b2738086b4f94f31b132e83498d45bbb" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -789,9 +789,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af91e3521b8b3eac26809b1c6f9b86e3ed455dfab812f036836aabdf709b921" +checksum = "cd7b21335b55c9f715e2acca0228dc1d6880d961756916c13a9ce70f9f413e70" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -5329,9 +5329,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75353c94e7515fac7d3c280bae56bff3375784a05cb44b317260606292ff6ba9" +checksum = "77284451ec70602f148f4f3bc6d1106fdfefd57c11ff459c4b2985e400ed1a18" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5347,9 +5347,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f24feef0404861c836b8fc0a3eb0cf6f20507e63ab59a61eeb1491c0f57bc352" +checksum = "c912ec93ec839076e8bbaaf7bd3d80aeedbe38cd5e8e3e76dfc67d217637e651" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5362,9 +5362,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dda5a5c4bc786f35f6c53ba611863a889790cc40a07c8160465072026795cba" +checksum = "bef4620ba6309ecc18e1aaa339836ca839b001a420ca245add040a3bde1ae9b1" dependencies = [ "alloy-consensus", "alloy-network", @@ -5377,9 +5377,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2ab185601941f4ed04418d71e42b220a9c59353c8fb98ba8993c42590c6742" +checksum = "9ab24c1b9c21cedd691938b5667c951b04ae8b89429d7cb7a88f30afb79cbbf1" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5401,9 +5401,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "680a86b63fe4c45fbd5dbf1ac6779409565211c4b234d20af94cf1f79d11f23a" +checksum = "2bdc32eba4d43bbd23f1f16dece7afd991d41ab4ffc2494a72b048e9f38db622" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5420,9 +5420,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeff9cf6fcdf8ef7183f254f9ad59b6e87af5084f21dfa17ba00c4448a84ddf1" +checksum = "b07175fcfd9d03a587ece7ce79fc288331e6d9ae523464eb677c751d5737713b" dependencies = [ "alloy-eips", "alloy-primitives", diff --git a/Cargo.toml b/Cargo.toml index dcb5510e79f..75feb6636c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -434,46 +434,46 @@ alloy-rlp = { version = "0.3.4", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.7.0", default-features = false } -alloy-contract = { version = "0.7.0", default-features = false } -alloy-eips = { version = "0.7.0", default-features = false } -alloy-genesis = { version = "0.7.0", default-features = false } -alloy-json-rpc = { version = "0.7.0", default-features = false } -alloy-network = { version = "0.7.0", default-features = false } -alloy-network-primitives = { version = "0.7.0", default-features = false } -alloy-node-bindings = { version = "0.7.0", default-features = false } -alloy-provider = { version = "0.7.0", features = [ +alloy-consensus = { version = "0.7.2", default-features = false } +alloy-contract = { version = "0.7.2", default-features = false } +alloy-eips = { version = "0.7.2", default-features = false } +alloy-genesis = { version = "0.7.2", default-features = false } +alloy-json-rpc = { version = "0.7.2", default-features = false } +alloy-network = { version = "0.7.2", default-features = false } +alloy-network-primitives = { version = "0.7.2", default-features = false } +alloy-node-bindings = { version = "0.7.2", default-features = false } +alloy-provider = { version = "0.7.2", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.7.0", default-features = false } -alloy-rpc-client = { version = "0.7.0", default-features = false } -alloy-rpc-types = { version = "0.7.0", features = [ +alloy-pubsub = { version = "0.7.2", default-features = false } +alloy-rpc-client = { version = "0.7.2", default-features = false } +alloy-rpc-types = { version = "0.7.2", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.7.0", default-features = false } -alloy-rpc-types-anvil = { version = "0.7.0", default-features = false } -alloy-rpc-types-beacon = { version = "0.7.0", default-features = false } -alloy-rpc-types-debug = { version = "0.7.0", default-features = false } -alloy-rpc-types-engine = { version = "0.7.0", default-features = false } -alloy-rpc-types-eth = { version = "0.7.0", default-features = false } -alloy-rpc-types-mev = { version = "0.7.0", default-features = false } -alloy-rpc-types-trace = { version = "0.7.0", default-features = false } -alloy-rpc-types-txpool = { version = "0.7.0", default-features = false } -alloy-serde = { version = "0.7.0", default-features = false } -alloy-signer = { version = "0.7.0", default-features = false } -alloy-signer-local = { version = "0.7.0", default-features = false } -alloy-transport = { version = "0.7.0" } -alloy-transport-http = { version = "0.7.0", features = [ +alloy-rpc-types-admin = { version = "0.7.2", default-features = false } +alloy-rpc-types-anvil = { version = "0.7.2", default-features = false } +alloy-rpc-types-beacon = { version = "0.7.2", default-features = false } +alloy-rpc-types-debug = { version = "0.7.2", default-features = false } +alloy-rpc-types-engine = { version = "0.7.2", default-features = false } +alloy-rpc-types-eth = { version = "0.7.2", default-features = false } +alloy-rpc-types-mev = { version = "0.7.2", default-features = false } +alloy-rpc-types-trace = { version = "0.7.2", default-features = false } +alloy-rpc-types-txpool = { version = "0.7.2", default-features = false } +alloy-serde = { version = "0.7.2", default-features = false } +alloy-signer = { version = "0.7.2", default-features = false } +alloy-signer-local = { version = "0.7.2", default-features = false } +alloy-transport = { version = "0.7.2" } +alloy-transport-http = { version = "0.7.2", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.7.0", default-features = false } -alloy-transport-ws = { version = "0.7.0", default-features = false } +alloy-transport-ipc = { version = "0.7.2", default-features = false } +alloy-transport-ws = { version = "0.7.2", default-features = false } # op -op-alloy-rpc-types = "0.7.1" -op-alloy-rpc-types-engine = "0.7.1" -op-alloy-network = "0.7.1" -op-alloy-consensus = "0.7.1" +op-alloy-rpc-types = "0.7.2" +op-alloy-rpc-types-engine = "0.7.2" +op-alloy-network = "0.7.2" +op-alloy-consensus = "0.7.2" # misc aquamarine = "0.6" diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index a80060b3377..3a748f529a0 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -62,6 +62,7 @@ where gas_limit, difficulty, base_fee, + .. } = bundle; if txs.is_empty() { return Err(EthApiError::InvalidParams( From 6cea9955b6d510d830510c6331e1a076d70bb7e6 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Mon, 2 Dec 2024 12:55:00 -0600 Subject: [PATCH 824/970] Check holocene activation based on the parent's timestamp (#13060) Co-authored-by: Matthias Seitz --- crates/optimism/chainspec/src/lib.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index f3450e87324..a3dab80705e 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -186,14 +186,20 @@ pub struct OpChainSpec { impl OpChainSpec { /// Read from parent to determine the base fee for the next block + /// + /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) pub fn next_block_base_fee( &self, parent: &Header, timestamp: u64, ) -> Result { - let is_holocene_activated = self - .inner - .is_fork_active_at_timestamp(reth_optimism_forks::OpHardfork::Holocene, timestamp); + // > if Holocene is active in parent_header.timestamp, then the parameters from + // > parent_header.extraData are used. + let is_holocene_activated = self.inner.is_fork_active_at_timestamp( + reth_optimism_forks::OpHardfork::Holocene, + parent.timestamp, + ); + // If we are in the Holocene, we need to use the base fee params // from the parent block's extra data. // Else, use the base fee params (default values) from chainspec From 8d10b9329531bfd224000026eb4f9c3f2e990e8d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:28:20 -0500 Subject: [PATCH 825/970] chore: rename codecs optimism feature to op (#13067) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/Cargo.toml | 2 +- crates/optimism/primitives/Cargo.toml | 6 +++--- crates/optimism/storage/Cargo.toml | 2 +- crates/primitives/Cargo.toml | 2 +- crates/storage/codecs/Cargo.toml | 2 +- crates/storage/codecs/src/alloy/transaction/mod.rs | 10 +++++----- crates/storage/db-api/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 2 +- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index a7e32684839..b937eb2b468 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -83,7 +83,7 @@ assert_matches.workspace = true [features] optimism = [ "reth-blockchain-tree/optimism", - "reth-codecs/optimism", + "reth-codecs/op", "reth-chainspec", "reth-db-api/optimism", "reth-db/optimism", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 9f370511d49..bdc423e1498 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-primitives-traits = { workspace = true, features = ["op"] } -reth-codecs = { workspace = true, optional = true, features = ["optimism"] } +reth-codecs = { workspace = true, optional = true, features = ["op"] } # ethereum alloy-primitives.workspace = true @@ -36,7 +36,7 @@ derive_more.workspace = true arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] -reth-codecs = { workspace = true, features = ["test-utils", "optimism"] } +reth-codecs = { workspace = true, features = ["test-utils", "op"] } rstest.workspace = true arbitrary.workspace = true @@ -57,7 +57,7 @@ reth-codec = [ "dep:reth-codecs", "reth-primitives/reth-codec", "reth-primitives-traits/reth-codec", - "reth-codecs?/optimism", + "reth-codecs?/op", "reth-primitives/reth-codec" ] serde = [ diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 2b18897d94a..b72e9c287df 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -22,6 +22,6 @@ reth-stages-types.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-codecs/optimism", + "reth-codecs/op", "reth-db-api/optimism" ] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3bfaefb39ed..2f8f37bcd35 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -146,7 +146,7 @@ c-kzg = [ ] optimism = [ "dep:op-alloy-consensus", - "reth-codecs?/optimism", + "reth-codecs?/op", "revm-primitives/optimism", ] alloy-compat = [ diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 57fe9f726c7..8fbf1632403 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -67,7 +67,7 @@ alloy = [ "dep:modular-bitfield", "dep:alloy-trie", ] -optimism = ["alloy", "dep:op-alloy-consensus"] +op = ["alloy", "dep:op-alloy-consensus"] test-utils = [ "std", "alloy", diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index dc27eacfacc..fe31293cd18 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -9,9 +9,9 @@ cond_mod!( ); -#[cfg(all(feature = "test-utils", feature = "optimism"))] +#[cfg(all(feature = "test-utils", feature = "op"))] pub mod optimism; -#[cfg(all(not(feature = "test-utils"), feature = "optimism"))] +#[cfg(all(not(feature = "test-utils"), feature = "op"))] mod optimism; #[cfg(test)] @@ -41,7 +41,7 @@ mod tests { assert_eq!(TxEip7702::bitflag_encoded_bytes(), 4); } - #[cfg(feature = "optimism")] + #[cfg(feature = "op")] #[test] fn test_ensure_backwards_compatibility_optimism() { assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); @@ -89,11 +89,11 @@ mod tests { )); } - #[cfg(feature = "optimism")] + #[cfg(feature = "op")] #[test] fn test_decode_deposit() { test_decode::(&hex!( "8108ac8f15983d59b6ae4911a00ff7bfcd2e53d2950926f8c82c12afad02861c46fcb293e776204052725e1c08ff2e9ff602ca916357601fa972a14094891fe3598b718758f22c46f163c18bcaa6296ce87e5267ef3fd932112842fbbf79011548cdf067d93ce6098dfc0aaf5a94531e439f30d6dfd0c6" - )); + )); } } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 05581b9725d..4f9c2d76b3f 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -82,4 +82,4 @@ arbitrary = [ "reth-stages-types/arbitrary", "alloy-consensus/arbitrary", ] -optimism = ["reth-primitives/optimism", "reth-codecs/optimism"] +optimism = ["reth-primitives/optimism", "reth-codecs/op"] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 2875b91149c..5a9595794d9 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -92,7 +92,7 @@ optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives", - "reth-codecs/optimism", + "reth-codecs/op", "reth-db/optimism", "reth-db-api/optimism", "revm/optimism", From d7f5846a37366a506c0192a4aeb571d22ae6ed5a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 20:53:41 +0100 Subject: [PATCH 826/970] chore: add tx trait bounds to primitives (#13075) --- crates/primitives-traits/src/node.rs | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index e610c094ba2..5b3691d2fdf 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,9 +1,8 @@ -use core::fmt; - use crate::{ Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, - FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, Receipt, + FullSignedTx, FullTxType, Receipt, SignedTransaction, TxType, }; +use core::fmt; /// Configures all the primitive types of the node. pub trait NodePrimitives: @@ -16,27 +15,9 @@ pub trait NodePrimitives: /// Block body primitive. type BlockBody: BlockBody; /// Signed version of the transaction type. - type SignedTx: Send - + Sync - + Unpin - + Clone - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + MaybeArbitrary - + 'static; + type SignedTx: SignedTransaction + 'static; /// Transaction envelope type ID. - type TxType: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeArbitrary - + 'static; + type TxType: TxType + 'static; /// A receipt. type Receipt: Receipt; } From 756eafa1aa05b98d393aadc1d01666814a33b372 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 21:20:45 +0100 Subject: [PATCH 827/970] chore: disable default features op forks (#13073) --- .github/assets/check_rv32imac.sh | 2 +- Cargo.toml | 2 +- crates/optimism/evm/Cargo.toml | 3 ++- crates/optimism/hardforks/src/dev.rs | 1 + crates/optimism/hardforks/src/lib.rs | 1 + 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index 9032c05b9d6..0556fa31dea 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -6,9 +6,9 @@ crates_to_check=( reth-codecs-derive reth-ethereum-forks reth-primitives-traits + reth-optimism-forks # reth-evm # reth-primitives - # reth-optimism-forks # reth-optimism-chainspec ) diff --git a/Cargo.toml b/Cargo.toml index 75feb6636c4..06f59719d61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -375,7 +375,7 @@ reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } -reth-optimism-forks = { path = "crates/optimism/hardforks" } +reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index c640b130841..95657e0ff20 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -65,7 +65,8 @@ std = [ "revm-primitives/std", "revm/std", "reth-ethereum-forks/std", - "derive_more/std" + "derive_more/std", + "reth-optimism-forks/std" ] optimism = [ "reth-primitives/optimism", diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 5fe77a31402..6dcd28c46c9 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,3 +1,4 @@ +use alloc::vec; use alloy_primitives::U256; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 3915bcf6cbd..bf6ca98ce4e 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; From 156984b377690ad651212fec3e769199b4434047 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 21:36:30 +0100 Subject: [PATCH 828/970] chore: misc direct imports (#13079) --- .../evm/execution-types/src/execution_outcome.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 7acbfea3366..c9e85ae444f 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,16 +1,14 @@ -use std::collections::HashMap; - +use crate::BlockExecutionOutput; use alloy_eips::eip7685::Requests; -use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; -use reth_primitives_traits::{receipt::ReceiptExt, Receipt}; +use alloy_primitives::{logs_bloom, Address, BlockNumber, Bloom, Log, B256, U256}; +use reth_primitives::Receipts; +use reth_primitives_traits::{receipt::ReceiptExt, Account, Bytecode, Receipt, StorageEntry}; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, }; - -use crate::BlockExecutionOutput; +use std::collections::HashMap; /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -376,10 +374,12 @@ mod tests { use super::*; #[cfg(not(feature = "optimism"))] use alloy_primitives::bytes; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::LogData; use alloy_primitives::{Address, B256}; use reth_primitives::Receipts; #[cfg(not(feature = "optimism"))] - use reth_primitives::{LogData, TxType}; + use reth_primitives::TxType; #[test] #[cfg(not(feature = "optimism"))] From 65193bdaf34bf7b2c89a53a32798150b06579d01 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 21:56:21 +0100 Subject: [PATCH 829/970] feat: add std feature to network-peers (#13078) --- .github/assets/check_wasm.sh | 1 + crates/chainspec/Cargo.toml | 3 ++- crates/net/p2p/Cargo.toml | 3 ++- crates/net/peers/Cargo.toml | 8 ++++++++ crates/net/peers/src/bootnodes/mod.rs | 1 + crates/net/peers/src/lib.rs | 17 ++++++++++++----- crates/net/peers/src/node_record.rs | 11 +++++++---- crates/net/peers/src/trusted_peer.rs | 25 ++++++++++++++----------- crates/optimism/chainspec/Cargo.toml | 3 ++- 9 files changed, 49 insertions(+), 23 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 11e5b5e00b9..971327f0cb2 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -70,6 +70,7 @@ exclude_crates=( reth-transaction-pool # c-kzg reth-trie-parallel # tokio reth-testing-utils + reth-network-peers ) # Array to hold the results diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 67968ee0a88..0e56cf2d3d9 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -50,7 +50,8 @@ std = [ "once_cell/std", "alloy-rlp/std", "reth-ethereum-forks/std", - "derive_more/std" + "derive_more/std", + "reth-network-peers/std" ] arbitrary = [ "alloy-chains/arbitrary", diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index a72110647c4..2c61da75184 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -58,5 +58,6 @@ std = [ "alloy-primitives/std", "reth-primitives-traits/std", "alloy-consensus/std", - "derive_more/std" + "derive_more/std", + "reth-network-peers/std" ] diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 5ac24edea75..8ca5faec93d 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -35,5 +35,13 @@ serde_json.workspace = true tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-rlp/std", + "secp256k1?/std", + "serde_with/std", + "thiserror/std" +] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] net = ["dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/bootnodes/mod.rs b/crates/net/peers/src/bootnodes/mod.rs index 31c91e5d1ce..b149c108a96 100644 --- a/crates/net/peers/src/bootnodes/mod.rs +++ b/crates/net/peers/src/bootnodes/mod.rs @@ -1,6 +1,7 @@ //! Bootnodes for the network use crate::NodeRecord; +use alloc::vec::Vec; mod ethereum; pub use ethereum::*; diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 1d60994d8e1..3e2777c2df8 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -52,9 +52,16 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{ + format, + string::{String, ToString}, +}; use alloy_primitives::B512; -use std::str::FromStr; +use core::str::FromStr; // Re-export PeerId for ease of use. pub use enr::Enr; @@ -137,8 +144,8 @@ impl AnyNode { let node_record = NodeRecord { address: enr .ip4() - .map(std::net::IpAddr::from) - .or_else(|| enr.ip6().map(std::net::IpAddr::from))?, + .map(core::net::IpAddr::from) + .or_else(|| enr.ip6().map(core::net::IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, udp_port: enr.udp4().or_else(|| enr.udp6())?, id: pk2id(&enr.public_key()), @@ -186,8 +193,8 @@ impl FromStr for AnyNode { } } -impl std::fmt::Display for AnyNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl core::fmt::Display for AnyNode { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::NodeRecord(record) => write!(f, "{record}"), #[cfg(feature = "secp256k1")] diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index ed48e242c1d..15ef5ad8522 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -1,15 +1,18 @@ //! Commonly used `NodeRecord` type for peers. -use std::{ +use crate::PeerId; +use alloc::{ + format, + string::{String, ToString}, +}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use core::{ fmt, fmt::Write, net::{IpAddr, Ipv4Addr, SocketAddr}, num::ParseIntError, str::FromStr, }; - -use crate::PeerId; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use serde_with::{DeserializeFromStr, SerializeDisplay}; #[cfg(feature = "secp256k1")] diff --git a/crates/net/peers/src/trusted_peer.rs b/crates/net/peers/src/trusted_peer.rs index aa7e0a01533..b87c4d6da2f 100644 --- a/crates/net/peers/src/trusted_peer.rs +++ b/crates/net/peers/src/trusted_peer.rs @@ -1,14 +1,14 @@ //! `NodeRecord` type that uses a domain instead of an IP. use crate::{NodeRecord, PeerId}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ +use alloc::string::{String, ToString}; +use core::{ fmt::{self, Write}, - io::Error, net::IpAddr, num::ParseIntError, str::FromStr, }; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use url::Host; /// Represents the node record of a trusted peer. The only difference between this and a @@ -45,11 +45,13 @@ impl TrustedPeer { Self { host, tcp_port: port, udp_port: port, id } } + #[cfg(any(test, feature = "std"))] const fn to_node_record(&self, ip: IpAddr) -> NodeRecord { NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port } } /// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address. + #[cfg(any(test, feature = "std"))] fn try_node_record(&self) -> Result { match &self.host { Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())), @@ -61,23 +63,24 @@ impl TrustedPeer { /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. /// /// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address. - pub fn resolve_blocking(&self) -> Result { + #[cfg(any(test, feature = "std"))] + pub fn resolve_blocking(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, }; // Resolve the domain to an IP address let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. #[cfg(any(test, feature = "net"))] - pub async fn resolve(&self) -> Result { + pub async fn resolve(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, @@ -85,9 +88,9 @@ impl TrustedPeer { // Resolve the domain to an IP address let mut ips = tokio::net::lookup_host(format!("{domain}:0")).await?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 7f74156b885..5ccf2660709 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -57,5 +57,6 @@ std = [ "reth-optimism-forks/std", "alloy-consensus/std", "once_cell/std", - "derive_more/std" + "derive_more/std", + "reth-network-peers/std" ] From 80d0fb0cda4a999098e5189ae00c3b3e999c739a Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 2 Dec 2024 21:03:00 +0000 Subject: [PATCH 830/970] chore: set event logs from `StaticFileProducer` and `Pruner` to `debug` (#13080) --- crates/node/events/src/node.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index edd85501ec0..86f1ea507ac 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -298,14 +298,14 @@ impl NodeState { fn handle_pruner_event(&self, event: PrunerEvent) { match event { PrunerEvent::Started { tip_block_number } => { - info!(tip_block_number, "Pruner started"); + debug!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { let stats = format!( "[{}]", stats.iter().map(|item| item.to_string()).collect::>().join(", ") ); - info!(tip_block_number, ?elapsed, %stats, "Pruner finished"); + debug!(tip_block_number, ?elapsed, pruned_segments = %stats, "Pruner finished"); } } } @@ -313,10 +313,10 @@ impl NodeState { fn handle_static_file_producer_event(&self, event: StaticFileProducerEvent) { match event { StaticFileProducerEvent::Started { targets } => { - info!(?targets, "Static File Producer started"); + debug!(?targets, "Static File Producer started"); } StaticFileProducerEvent::Finished { targets, elapsed } => { - info!(?targets, ?elapsed, "Static File Producer finished"); + debug!(?targets, ?elapsed, "Static File Producer finished"); } } } From bcfe9ebb25ff1318f64d3446ce3d971c9e5d15ed Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 2 Dec 2024 22:07:18 +0100 Subject: [PATCH 831/970] feat(trie): `SparseStateTrie::new` (#13068) --- crates/trie/sparse/src/state.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 0ca290e2d0c..cf0bc20abe4 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -53,6 +53,18 @@ impl SparseStateTrie { } impl SparseStateTrie { + /// Create new [`SparseStateTrie`] with blinded node provider factory. + pub fn new(provider_factory: F) -> Self { + Self { + provider_factory, + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } + /// Set the retention of branch node updates and deletions. pub const fn with_updates(mut self, retain_updates: bool) -> Self { self.retain_updates = retain_updates; From 2c5a1a743a43a2fd2cc194fd7df1825989bee35d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 22:17:54 +0100 Subject: [PATCH 832/970] chore: disable nybbles default feature (#13081) --- Cargo.toml | 2 +- crates/evm/execution-errors/Cargo.toml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 06f59719d61..550a7135206 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -503,7 +503,7 @@ modular-bitfield = "0.11.2" notify = { version = "6.1.1", default-features = false, features = [ "macos_fsevent", ] } -nybbles = "0.2.1" +nybbles = { version = "0.2.1", default-features = false } once_cell = { version = "1.19", default-features = false, features = [ "critical-section", ] } diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 01727cd70b5..5e1755c0c55 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -32,5 +32,6 @@ std = [ "alloy-primitives/std", "revm-primitives/std", "alloy-rlp/std", - "derive_more/std" + "derive_more/std", + "nybbles/std" ] From 039f1215d05b736f7ca23384cd12e566cd00d639 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:46:54 -0500 Subject: [PATCH 833/970] chore: make `has_eip4844` generic over `SignedTransaction` (#13083) --- crates/net/eth-wire-types/src/broadcast.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index b54fd0df2db..72a1116c392 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -8,7 +8,7 @@ use alloy_rlp::{ use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; use reth_primitives::TransactionSigned; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{SignedTransaction, Transaction}; use std::{ collections::{HashMap, HashSet}, mem, @@ -94,7 +94,7 @@ pub struct Transactions( pub Vec, ); -impl Transactions { +impl Transactions { /// Returns `true` if the list of transactions contains any blob transactions. pub fn has_eip4844(&self) -> bool { self.0.iter().any(|tx| tx.is_eip4844()) From 98319537814fe689e487090d2b3083375684c581 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 2 Dec 2024 22:55:56 +0100 Subject: [PATCH 834/970] chore: flatten reth-primitives dep (#13082) --- Cargo.lock | 3 ++- crates/storage/errors/Cargo.toml | 7 ++++--- crates/storage/errors/src/provider.rs | 4 ++-- crates/storage/errors/src/writer.rs | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66cd14d1e2a..8142bbc8387 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9298,7 +9298,8 @@ dependencies = [ "alloy-rlp", "derive_more 1.0.0", "reth-fs-util", - "reth-primitives", + "reth-primitives-traits", + "reth-static-file-types", ] [[package]] diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 0f2cf03f652..2e864e09d43 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -12,8 +12,9 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true +reth-static-file-types.workspace = true # ethereum alloy-eips.workspace = true @@ -26,9 +27,9 @@ derive_more.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", "alloy-rlp/std", - "derive_more/std" + "derive_more/std", + "reth-primitives-traits/std" ] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index e69c0343f56..d4b69cffb08 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -3,7 +3,8 @@ use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment}; +use reth_primitives_traits::GotExpected; +use reth_static_file_types::StaticFileSegment; /// Provider result type. pub type ProviderResult = Result; @@ -165,7 +166,6 @@ impl core::error::Error for ProviderError { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), Self::StorageLockError(source) => core::error::Error::source(source), Self::UnifiedStorageWriterError(source) => core::error::Error::source(source), _ => Option::None, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs index 10d4ad96ed3..3e060d7005d 100644 --- a/crates/storage/errors/src/writer.rs +++ b/crates/storage/errors/src/writer.rs @@ -1,5 +1,5 @@ use crate::db::DatabaseError; -use reth_primitives::StaticFileSegment; +use reth_static_file_types::StaticFileSegment; /// `UnifiedStorageWriter` related errors /// `StorageWriter` related errors From 9ed9fa241d66caadc9e6b6065cbbc5984b57b97a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 3 Dec 2024 03:36:09 +0400 Subject: [PATCH 835/970] refactor: rename `TransactionSignedEcRecovered` to `RecoveredTx` (#13074) --- crates/blockchain-tree/src/blockchain_tree.rs | 6 +- crates/chain-state/src/test_utils.rs | 7 +- crates/evm/execution-types/src/chain.rs | 9 +- crates/optimism/node/src/txpool.rs | 5 +- crates/optimism/node/tests/it/priority.rs | 4 +- crates/optimism/payload/src/builder.rs | 2 +- crates/optimism/payload/src/error.rs | 4 +- crates/optimism/rpc/src/eth/transaction.rs | 4 +- crates/payload/util/src/traits.rs | 4 +- crates/payload/util/src/transaction.rs | 8 +- crates/primitives/src/block.rs | 14 +-- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/transaction/error.rs | 2 +- crates/primitives/src/transaction/mod.rs | 92 +++++++++++-------- crates/primitives/src/transaction/pooled.rs | 92 +++++-------------- .../rpc-eth-api/src/helpers/pending_block.rs | 4 +- crates/rpc/rpc-eth-types/src/transaction.rs | 12 +-- .../rpc/rpc-types-compat/src/transaction.rs | 12 +-- crates/rpc/rpc/src/eth/bundle.rs | 2 +- crates/rpc/rpc/src/eth/filter.rs | 6 +- crates/rpc/rpc/src/eth/helpers/types.rs | 4 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 2 +- crates/rpc/rpc/src/txpool.rs | 8 +- crates/transaction-pool/src/maintain.rs | 6 +- crates/transaction-pool/src/pool/best.rs | 10 +- .../transaction-pool/src/test_utils/mock.rs | 16 ++-- crates/transaction-pool/src/traits.rs | 46 ++++------ crates/transaction-pool/src/validate/mod.rs | 8 +- 28 files changed, 170 insertions(+), 223 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 757729d5416..91ddd75f2a7 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1390,7 +1390,7 @@ mod tests { use reth_node_types::FullNodePrimitives; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, }; use reth_provider::{ providers::ProviderNodeTypes, @@ -1574,7 +1574,7 @@ mod tests { } let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { + let mock_tx = |nonce: u64| -> RecoveredTx { TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), @@ -1591,7 +1591,7 @@ mod tests { let mock_block = |number: u64, parent: Option, - body: Vec, + body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { let signed_body = diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index f6b0a4f1772..1cd9f2df96b 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -14,9 +14,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, SealedBlock, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredTx, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, - TransactionSignedEcRecovered, }; use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; @@ -91,7 +90,7 @@ impl TestBlockBuilder { ) -> SealedBlockWithSenders { let mut rng = thread_rng(); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { + let mock_tx = |nonce: u64| -> RecoveredTx { let tx = Transaction::Eip1559(TxEip1559 { chain_id: self.chain_spec.chain.id(), nonce, @@ -109,7 +108,7 @@ impl TestBlockBuilder { let num_txs = rng.gen_range(0..5); let signer_balance_decrease = Self::single_tx_cost() * U256::from(num_txs); - let transactions: Vec = (0..num_txs) + let transactions: Vec = (0..num_txs) .map(|_| { let tx = mock_tx(self.signer_build_account_info.nonce); self.signer_build_account_info.nonce += 1; diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 20bf5c6d24d..cbdb2296bf6 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -8,8 +8,8 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, SealedBlockFor, SealedBlockWithSenders, - SealedHeader, TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, }; use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; @@ -436,14 +436,13 @@ impl>> ChainBlocks<'_, self.blocks.values().flat_map(|block| block.transactions_with_sender()) } - /// Returns an iterator over all [`TransactionSignedEcRecovered`] in the blocks + /// Returns an iterator over all [`RecoveredTx`] in the blocks /// /// Note: This clones the transactions since it is assumed this is part of a shared [Chain]. #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator::Transaction>> + '_ - { + ) -> impl Iterator::Transaction>> + '_ { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 6d269d361d8..d8246aeb7db 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -237,7 +237,7 @@ mod tests { use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; use reth_chainspec::MAINNET; - use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; + use reth_primitives::{RecoveredTx, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, @@ -266,8 +266,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); - let signed_recovered = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); + let signed_recovered = RecoveredTx::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); let pooled_tx = EthPooledTransaction::new(signed_recovered, len); let outcome = validator.validate_one(origin, pooled_tx); diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 35be3dfd3ee..b5487987f6a 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -27,7 +27,7 @@ use reth_optimism_node::{ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::OpPrimitives; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; -use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, SealedBlock, Transaction, TransactionSigned}; use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskManager; use reth_transaction_pool::pool::BestPayloadTransactions; @@ -64,7 +64,7 @@ impl OpPayloadTransactions for CustomTxPriority { ..Default::default() }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); - let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( + let end_of_block_tx = RecoveredTx::from_signed_transaction( TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), sender.address(), ); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index aeaa8ef4079..6ae52188d18 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -749,7 +749,7 @@ where )) } - // Convert the transaction to a [TransactionSignedEcRecovered]. This is + // Convert the transaction to a [RecoveredTx]. This is // purely for the purposes of utilizing the `evm_config.tx_env`` function. // Deposit transactions do not have signatures, so if the tx is a deposit, this // will just pull in its `from` address. diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 8a254e9835c..6b2a85e7a97 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -4,8 +4,8 @@ #[derive(Debug, thiserror::Error)] pub enum OpPayloadBuilderError { /// Thrown when a transaction fails to convert to a - /// [`reth_primitives::TransactionSignedEcRecovered`]. - #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] + /// [`reth_primitives::RecoveredTx`]. + #[error("failed to convert deposit transaction to RecoveredTx")] TransactionEcRecoverFailed, /// Thrown when the L1 block info could not be parsed from the calldata of the /// first transaction supplied in the payload attributes. diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 2b92927f649..3ba5edead55 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -6,7 +6,7 @@ use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -81,7 +81,7 @@ where fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs index 52dad511169..5c1eb38bea3 100644 --- a/crates/payload/util/src/traits.rs +++ b/crates/payload/util/src/traits.rs @@ -1,5 +1,5 @@ use alloy_primitives::Address; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::RecoveredTx; /// Iterator that returns transactions for the block building process in the order they should be /// included in the block. @@ -12,7 +12,7 @@ pub trait PayloadTransactions { &mut self, // In the future, `ctx` can include access to state for block building purposes. ctx: (), - ) -> Option; + ) -> Option; /// Exclude descendants of the transaction with given sender and nonce from the iterator, /// because this transaction won't be included in the block. diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs index a45e177d4d3..ebd3b079626 100644 --- a/crates/payload/util/src/transaction.rs +++ b/crates/payload/util/src/transaction.rs @@ -1,7 +1,7 @@ use crate::PayloadTransactions; use alloy_consensus::Transaction; use alloy_primitives::Address; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::RecoveredTx; /// An implementation of [`crate::traits::PayloadTransactions`] that yields /// a pre-defined set of transactions. @@ -26,8 +26,8 @@ impl PayloadTransactionsFixed { } } -impl PayloadTransactions for PayloadTransactionsFixed { - fn next(&mut self, _ctx: ()) -> Option { +impl PayloadTransactions for PayloadTransactionsFixed { + fn next(&mut self, _ctx: ()) -> Option { (self.index < self.transactions.len()).then(|| { let tx = self.transactions[self.index].clone(); self.index += 1; @@ -92,7 +92,7 @@ where B: PayloadTransactions, A: PayloadTransactions, { - fn next(&mut self, ctx: ()) -> Option { + fn next(&mut self, ctx: ()) -> Option { while let Some(tx) = self.before.next(ctx) { if let Some(before_max_gas) = self.before_max_gas { if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 9e00a2e582c..9edbb2471ef 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,6 +1,6 @@ use crate::{ traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, - SealedHeader, TransactionSigned, TransactionSignedEcRecovered, + RecoveredTx, SealedHeader, TransactionSigned, }; use alloc::vec::Vec; use alloy_consensus::Header; @@ -206,11 +206,7 @@ impl BlockWithSenders { #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator< - Item = TransactionSignedEcRecovered< - ::Transaction, - >, - > + ) -> impl Iterator::Transaction>> where ::Transaction: SignedTransaction, { @@ -560,11 +556,7 @@ impl SealedBlockWithSenders { #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator< - Item = TransactionSignedEcRecovered< - ::Transaction, - >, - > + ) -> impl Iterator::Transaction>> where ::Transaction: SignedTransaction, { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 224e025f39d..97407ba610c 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -52,8 +52,8 @@ pub use static_file::StaticFileSegment; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, BlobTransaction, InvalidTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, + PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; // Re-exports diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index 790292cd82b..78f6cf5e5fd 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -76,7 +76,7 @@ pub enum TransactionConversionError { } /// Represents error variants than can happen when trying to convert a -/// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. +/// [`RecoveredTx`](crate::RecoveredTx) transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] pub enum TryFromRecoveredTransactionError { /// Thrown if the transaction type is unsupported. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f4c4a0f2997..0eeaf310853 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1107,56 +1107,56 @@ impl TransactionSigned { } } - /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + /// Returns the [`RecoveredTx`] transaction with the given sender. #[inline] - pub const fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self, signer) + pub const fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] + /// Consumes the type, recover signer and return [`RecoveredTx`] /// /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn into_ecrecovered(self) -> Option { + pub fn into_ecrecovered(self) -> Option { let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn into_ecrecovered_unchecked(self) -> Option { + pub fn into_ecrecovered_unchecked(self) -> Option { let signer = self.recover_signer_unchecked()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Tries to recover signer and return [`TransactionSignedEcRecovered`] by cloning the type. - pub fn try_ecrecovered(&self) -> Option { + /// Tries to recover signer and return [`RecoveredTx`] by cloning the type. + pub fn try_ecrecovered(&self) -> Option { let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self.clone(), signer }) + Some(RecoveredTx { signed_transaction: self.clone(), signer }) } - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. + /// Tries to recover signer and return [`RecoveredTx`]. /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also /// [`Self::recover_signer`]. - pub fn try_into_ecrecovered(self) -> Result { + pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. _without ensuring that + /// Tries to recover signer and return [`RecoveredTx`]. _without ensuring that /// the signature has a low `s` value_ (EIP-2). /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn try_into_ecrecovered_unchecked(self) -> Result { + pub fn try_into_ecrecovered_unchecked(self) -> Result { match self.recover_signer_unchecked() { None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } @@ -1433,8 +1433,8 @@ impl alloy_consensus::Transaction for TransactionSigned { } } -impl From for TransactionSigned { - fn from(recovered: TransactionSignedEcRecovered) -> Self { +impl From for TransactionSigned { + fn from(recovered: RecoveredTx) -> Self { recovered.signed_transaction } } @@ -1620,9 +1620,12 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } } +/// Type alias kept for backward compatibility. +pub type TransactionSignedEcRecovered = RecoveredTx; + /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct RecoveredTx { /// Signer of the transaction signer: Address, /// Signed transaction @@ -1631,9 +1634,9 @@ pub struct TransactionSignedEcRecovered { signed_transaction: T, } -// === impl TransactionSignedEcRecovered === +// === impl RecoveredTx === -impl TransactionSignedEcRecovered { +impl RecoveredTx { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer @@ -1654,7 +1657,7 @@ impl TransactionSignedEcRecovered { (self.signed_transaction, self.signer) } - /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the + /// Create [`RecoveredTx`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { @@ -1662,7 +1665,7 @@ impl TransactionSignedEcRecovered { } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for RecoveredTx { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1675,7 +1678,7 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for RecoveredTx { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { let signed_transaction = T::decode(buf)?; let signer = signed_transaction @@ -1685,20 +1688,38 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Extension trait for [`SignedTransaction`] to convert it into [`TransactionSignedEcRecovered`]. +impl Encodable2718 for RecoveredTx { + fn type_flag(&self) -> Option { + self.signed_transaction.type_flag() + } + + fn encode_2718_len(&self) -> usize { + self.signed_transaction.encode_2718_len() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.signed_transaction.encode_2718(out) + } + + fn trie_hash(&self) -> B256 { + self.signed_transaction.trie_hash() + } +} + +/// Extension trait for [`SignedTransaction`] to convert it into [`RecoveredTx`]. pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid. - fn into_ecrecovered_unchecked(self) -> Option> { + fn into_ecrecovered_unchecked(self) -> Option> { let signer = self.recover_signer_unchecked()?; - Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) + Some(RecoveredTx::from_signed_transaction(self, signer)) } - /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. - fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self, signer) + /// Returns the [`RecoveredTx`] transaction with the given sender. + fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } } @@ -1944,7 +1965,7 @@ where mod tests { use crate::{ transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, + RecoveredTx, Transaction, TransactionSigned, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; @@ -2205,8 +2226,7 @@ mod tests { let tx = TransactionSigned::decode(&mut &input[..]).unwrap(); let recovered = tx.into_ecrecovered().unwrap(); - let decoded = - TransactionSignedEcRecovered::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); + let decoded = RecoveredTx::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); assert_eq!(recovered, decoded) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 5015f5b8e46..cdcc6b808dd 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -5,9 +5,7 @@ use super::{ error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, TxEip7702, }; -use crate::{ - BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, -}; +use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned, TxType}; use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, @@ -26,7 +24,6 @@ use alloy_primitives::{ use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use core::hash::{Hash, Hasher}; -use derive_more::{AsRef, Deref}; use reth_primitives_traits::{InMemorySize, SignedTransaction}; use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; @@ -77,7 +74,7 @@ impl PooledTransactionsElement { } } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not @@ -151,7 +148,7 @@ impl PooledTransactionsElement { pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), - Some(signer) => Ok(PooledTransactionsElementEcRecovered { transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } @@ -167,10 +164,10 @@ impl PooledTransactionsElement { } } - /// Create [`TransactionSignedEcRecovered`] by converting this transaction into + /// Create [`RecoveredTx`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. - pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self.into_transaction(), signer) + pub fn into_ecrecovered_transaction(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self.into_transaction(), signer) } /// Returns the inner [`TransactionSigned`]. @@ -645,7 +642,7 @@ impl InMemorySize for PooledTransactionsElement { impl From for PooledTransactionsElement { fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { - recovered.into_transaction() + recovered.into_signed() } } @@ -691,92 +688,45 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { } /// A signed pooled transaction with recovered signer. -#[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { - /// Signer of the transaction - signer: Address, - /// Signed transaction - #[deref] - #[as_ref] - transaction: T, -} - -impl PooledTransactionsElementEcRecovered { - /// Create an instance from the given transaction and the [`Address`] of the signer. - pub const fn from_signed_transaction(transaction: T, signer: Address) -> Self { - Self { transaction, signer } - } - - /// Signer of transaction recovered from signature - pub const fn signer(&self) -> Address { - self.signer - } +pub type PooledTransactionsElementEcRecovered = RecoveredTx; - /// Consume the type and return the transaction - pub fn into_transaction(self) -> T { - self.transaction - } - - /// Dissolve Self to its component - pub fn into_components(self) -> (T, Address) { - (self.transaction, self.signer) - } -} impl PooledTransactionsElementEcRecovered { - /// Transform back to [`TransactionSignedEcRecovered`] - pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { - let (tx, signer) = self.into_components(); + /// Transform back to [`RecoveredTx`] + pub fn into_ecrecovered_transaction(self) -> RecoveredTx { + let (tx, signer) = self.to_components(); tx.into_ecrecovered_transaction(signer) } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns the transaction is not an EIP-4844 transaction. pub fn try_from_blob_transaction( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, sidecar: BlobTransactionSidecar, - ) -> Result { - let TransactionSignedEcRecovered { signer, signed_transaction } = tx; + ) -> Result { + let RecoveredTx { signer, signed_transaction } = tx; let transaction = PooledTransactionsElement::try_from_blob_transaction(signed_transaction, sidecar) - .map_err(|tx| TransactionSignedEcRecovered { signer, signed_transaction: tx })?; - Ok(Self { transaction, signer }) + .map_err(|tx| RecoveredTx { signer, signed_transaction: tx })?; + Ok(Self::from_signed_transaction(transaction, signer)) } } -/// Converts a `TransactionSignedEcRecovered` into a `PooledTransactionsElementEcRecovered`. -impl TryFrom for PooledTransactionsElementEcRecovered { +/// Converts a `Recovered` into a `PooledTransactionsElementEcRecovered`. +impl TryFrom for PooledTransactionsElementEcRecovered { type Error = TransactionConversionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { match PooledTransactionsElement::try_from(tx.signed_transaction) { Ok(pooled_transaction) => { - Ok(Self { transaction: pooled_transaction, signer: tx.signer }) + Ok(Self::from_signed_transaction(pooled_transaction, tx.signer)) } Err(_) => Err(TransactionConversionError::UnsupportedForP2P), } } } -impl Encodable2718 for PooledTransactionsElementEcRecovered { - fn type_flag(&self) -> Option { - self.transaction.type_flag() - } - - fn encode_2718_len(&self) -> usize { - self.transaction.encode_2718_len() - } - - fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.encode_2718(out) - } - - fn trie_hash(&self) -> B256 { - self.transaction.trie_hash() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 4394feb2834..72d53a22a23 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -18,7 +18,7 @@ use reth_evm::{ use reth_execution_types::ExecutionOutcome; use reth_primitives::{ proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, - Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, + Receipt, RecoveredTx, SealedBlockWithSenders, SealedHeader, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -194,7 +194,7 @@ pub trait LoadPendingBlock: /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &TransactionSignedEcRecovered, + tx: &RecoveredTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> Receipt { diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 83ef97807de..f994638d3af 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -4,7 +4,7 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, @@ -15,13 +15,13 @@ use reth_rpc_types_compat::{ #[derive(Debug, Clone, Eq, PartialEq)] pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(RecoveredTx), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: RecoveredTx, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -37,7 +37,7 @@ pub enum TransactionSource { impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> RecoveredTx { self.into() } @@ -63,7 +63,7 @@ impl TransactionSource { } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (RecoveredTx, TransactionInfo) { match self { Self::Pool(tx) => { let hash = tx.trie_hash(); @@ -86,7 +86,7 @@ impl TransactionSource { } } -impl From> for TransactionSignedEcRecovered { +impl From> for RecoveredTx { fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index b439b61d44e..d6180ca1ee2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -17,7 +17,7 @@ use serde::{Deserialize, Serialize}; /// The block hash, number, and tx index fields should be from the original block where the /// transaction was mined. pub fn from_recovered_with_block_context>( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, resp_builder: &T, ) -> Result { @@ -27,7 +27,7 @@ pub fn from_recovered_with_block_context>( /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. pub fn from_recovered>( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, resp_builder: &T, ) -> Result { resp_builder.fill(tx, TransactionInfo::default()) @@ -53,7 +53,7 @@ pub trait TransactionCompat: /// environment related fields to `None`. fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_inf: TransactionInfo, ) -> Result; @@ -63,8 +63,8 @@ pub trait TransactionCompat: fn otterscan_api_truncate_input(tx: &mut Self::Transaction); } -/// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] -pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { +/// Convert [`RecoveredTx`] to [`TransactionRequest`] +pub fn transaction_to_call_request(tx: RecoveredTx) -> TransactionRequest { let from = tx.signer(); let to = Some(tx.transaction.to().into()); let gas = tx.transaction.gas_limit(); diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 3a748f529a0..2924e6ea25f 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -82,7 +82,7 @@ where .map(recover_raw_transaction) .collect::, _>>()? .into_iter() - .map(|tx| tx.into_components()) + .map(|tx| tx.to_components()) .collect::>(); // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index b16b370b2c0..c1ef67d9b59 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -9,7 +9,7 @@ use alloy_rpc_types_eth::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; +use reth_primitives::{Receipt, RecoveredTx, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, ProviderError}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, @@ -621,7 +621,7 @@ where /// Returns all new pending transactions received since the last poll. async fn drain(&self) -> FilterChanges where - T: PoolTransaction>, + T: PoolTransaction>, { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; @@ -651,7 +651,7 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { impl FullTransactionsFilter for FullTransactionsReceiver where - T: PoolTransaction> + 'static, + T: PoolTransaction> + 'static, TxCompat: TransactionCompat + 'static, { async fn drain(&self) -> FilterChanges { diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 157213b54e6..79fb6fcc907 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -3,7 +3,7 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_rpc_eth_api::EthApiTypes; use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; @@ -37,7 +37,7 @@ where fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index f77b7e79da0..87778ec6e65 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -172,7 +172,7 @@ where BundleItem::Tx { tx, can_revert } => { let recovered_tx = recover_raw_transaction(tx.clone()).map_err(EthApiError::from)?; - let (tx, signer) = recovered_tx.into_components(); + let (tx, signer) = recovered_tx.to_components(); let tx = tx.into_transaction(); let refund_percent = diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 442e28ffc4c..b12e8e7ab57 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_txpool::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::RecoveredTx; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; @@ -44,7 +44,7 @@ where resp_builder: &RpcTxB, ) -> Result<(), RpcTxB::Error> where - Tx: PoolTransaction>, + Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( @@ -96,12 +96,12 @@ where trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] - fn insert>>( + fn insert>>( tx: &T, inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx: TransactionSignedEcRecovered = tx.clone_into_consensus().into(); + let tx: RecoveredTx = tx.clone_into_consensus().into(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 1a5fd839926..7e28b6e2685 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -19,8 +19,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, RecoveredTx, SealedHeader, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -604,8 +603,7 @@ where let local_transactions = local_transactions .into_iter() .map(|tx| { - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone_into_consensus().into(); + let recovered: RecoveredTx = tx.transaction.clone_into_consensus().into(); recovered.into_signed() }) .collect::>(); diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index ed94bc67623..be49ce0b1fd 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -7,7 +7,7 @@ use crate::{ use alloy_primitives::Address; use core::fmt; use reth_payload_util::PayloadTransactions; -use reth_primitives::{InvalidTransactionError, TransactionSignedEcRecovered}; +use reth_primitives::{InvalidTransactionError, RecoveredTx}; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -226,7 +226,7 @@ impl Iterator for BestTransactions { #[derive(Debug)] pub struct BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction>, I: Iterator>>, { invalid: HashSet
, @@ -235,7 +235,7 @@ where impl BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction>, I: Iterator>>, { /// Create a new `BestPayloadTransactions` with the given iterator. @@ -246,10 +246,10 @@ where impl PayloadTransactions for BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction>, I: Iterator>>, { - fn next(&mut self, _ctx: ()) -> Option { + fn next(&mut self, _ctx: ()) -> Option { loop { let tx = self.best.next()?; if self.invalid.contains(&tx.sender()) { diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 78982cb4657..05551151d78 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -26,7 +26,7 @@ use rand::{ }; use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, + RecoveredTx, Transaction, TransactionSigned, TxType, }; use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -592,7 +592,7 @@ impl MockTransaction { impl PoolTransaction for MockTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = RecoveredTx; type Pooled = PooledTransactionsElementEcRecovered; @@ -804,10 +804,10 @@ impl EthPoolTransaction for MockTransaction { } } -impl TryFrom for MockTransaction { +impl TryFrom for MockTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { let sender = tx.signer(); let transaction = tx.into_signed(); let hash = transaction.hash(); @@ -926,7 +926,7 @@ impl From for MockTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: MockTransaction) -> Self { let signed_tx = TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); @@ -1029,11 +1029,9 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { arb::<(TransactionSigned, Address)>() .prop_map(|(signed_transaction, signer)| { - TransactionSignedEcRecovered::from_signed_transaction(signed_transaction, signer) + RecoveredTx::from_signed_transaction(signed_transaction, signer) .try_into() - .expect( - "Failed to create an Arbitrary MockTransaction via TransactionSignedEcRecovered", - ) + .expect("Failed to create an Arbitrary MockTransaction via RecoveredTx") }) .boxed() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 11c8db225b0..a5c85ce125b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -20,8 +20,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, - TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, RecoveredTx, SealedBlock, Transaction, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] @@ -577,12 +576,12 @@ pub struct AllPoolTransactions { // === impl AllPoolTransactions === impl AllPoolTransactions { - /// Returns an iterator over all pending [`TransactionSignedEcRecovered`] transactions. + /// Returns an iterator over all pending [`RecoveredTx`] transactions. pub fn pending_recovered(&self) -> impl Iterator + '_ { self.pending.iter().map(|tx| tx.transaction.clone().into()) } - /// Returns an iterator over all queued [`TransactionSignedEcRecovered`] transactions. + /// Returns an iterator over all queued [`RecoveredTx`] transactions. pub fn queued_recovered(&self) -> impl Iterator + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } @@ -1132,9 +1131,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< - Consensus: From - + Into - + Into, + Consensus: From + Into + Into, Pooled: From + Into + Into, @@ -1166,10 +1163,10 @@ pub trait EthPoolTransaction: /// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. /// -/// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional +/// This type is essentially a wrapper around [`RecoveredTx`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { +pub struct EthPooledTransaction { /// `EcRecovered` transaction, the consensus format. pub(crate) transaction: T, @@ -1192,7 +1189,7 @@ impl EthPooledTransaction { /// /// Caution: In case of blob transactions, this does marks the blob sidecar as /// [`EthBlobTransactionSidecar::Missing`] - pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { + pub fn new(transaction: RecoveredTx, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) @@ -1215,7 +1212,7 @@ impl EthPooledTransaction { } /// Return the reference to the underlying transaction. - pub const fn transaction(&self) -> &TransactionSignedEcRecovered { + pub const fn transaction(&self) -> &RecoveredTx { &self.transaction } } @@ -1224,12 +1221,12 @@ impl EthPooledTransaction { impl From for EthPooledTransaction { fn from(tx: PooledTransactionsElementEcRecovered) -> Self { let encoded_length = tx.encode_2718_len(); - let (tx, signer) = tx.into_components(); + let (tx, signer) = tx.to_components(); match tx { PooledTransactionsElement::BlobTransaction(tx) => { // include the blob sidecar let (tx, blob) = tx.into_parts(); - let tx = TransactionSignedEcRecovered::from_signed_transaction(tx, signer); + let tx = RecoveredTx::from_signed_transaction(tx, signer); let mut pooled = Self::new(tx, encoded_length); pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); pooled @@ -1245,7 +1242,7 @@ impl From for EthPooledTransaction { impl PoolTransaction for EthPooledTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = RecoveredTx; type Pooled = PooledTransactionsElementEcRecovered; @@ -1406,10 +1403,10 @@ impl EthPoolTransaction for EthPooledTransaction { } } -impl TryFrom for EthPooledTransaction { +impl TryFrom for EthPooledTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { // ensure we can handle the transaction type and its format match tx.tx_type() as u8 { 0..=EIP1559_TX_TYPE_ID | EIP7702_TX_TYPE_ID => { @@ -1433,7 +1430,7 @@ impl TryFrom for EthPooledTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: EthPooledTransaction) -> Self { tx.transaction } @@ -1645,8 +1642,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1667,8 +1663,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1689,8 +1684,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1713,8 +1707,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); // Check that the pooled transaction is created correctly @@ -1737,8 +1730,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index a93825212f8..d333be87963 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -9,7 +9,7 @@ use crate::{ use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{SealedBlock, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, SealedBlock}; use std::{fmt, future::Future, time::Instant}; mod constants; @@ -435,11 +435,11 @@ impl ValidPoolTransaction { } } -impl>> ValidPoolTransaction { - /// Converts to this type into a [`TransactionSignedEcRecovered`]. +impl>> ValidPoolTransaction { + /// Converts to this type into a [`RecoveredTx`]. /// /// Note: this takes `&self` since indented usage is via `Arc`. - pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { + pub fn to_recovered_transaction(&self) -> RecoveredTx { self.to_consensus().into() } } From ea82cbdc607f98ef5454c4a10912fa197810cec9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 Dec 2024 00:47:46 -0600 Subject: [PATCH 836/970] chore(sdk): define `OpTransactionSigned` (#11433) --- Cargo.lock | 6 + crates/optimism/bin/Cargo.toml | 3 +- crates/optimism/cli/Cargo.toml | 1 + crates/optimism/evm/Cargo.toml | 4 +- crates/optimism/node/Cargo.toml | 5 +- crates/optimism/primitives/Cargo.toml | 42 +- crates/optimism/primitives/src/lib.rs | 9 +- .../primitives/src/transaction/mod.rs | 30 +- .../primitives/src/transaction/signed.rs | 479 ++++++++++++++++++ crates/optimism/rpc/Cargo.toml | 3 +- crates/primitives/src/transaction/mod.rs | 13 +- crates/primitives/src/transaction/util.rs | 5 +- crates/storage/provider/Cargo.toml | 1 + 13 files changed, 572 insertions(+), 29 deletions(-) create mode 100644 crates/optimism/primitives/src/transaction/signed.rs diff --git a/Cargo.lock b/Cargo.lock index 8142bbc8387..b799d0e0811 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8464,14 +8464,20 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rlp", "arbitrary", "bytes", "derive_more 1.0.0", "op-alloy-consensus", + "proptest", + "proptest-arbitrary-interop", + "rand 0.8.5", "reth-codecs", "reth-primitives", "reth-primitives-traits", + "revm-primitives", "rstest", + "secp256k1", "serde", ] diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 9007d084891..60fde90f191 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -44,7 +44,8 @@ optimism = [ "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", "reth-optimism-rpc/optimism", - "reth-provider/optimism" + "reth-provider/optimism", + "reth-optimism-primitives/op", ] dev = [ diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index b61a4628f4d..48ea2d07dec 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -95,6 +95,7 @@ optimism = [ "reth-execution-types/optimism", "reth-db/optimism", "reth-db-api/optimism", + "reth-optimism-primitives/op", "reth-downloaders/optimism" ] asm-keccak = [ diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 95657e0ff20..309ddc1cb4e 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -64,6 +64,7 @@ std = [ "alloy-primitives/std", "revm-primitives/std", "revm/std", + "reth-optimism-primitives/std", "reth-ethereum-forks/std", "derive_more/std", "reth-optimism-forks/std" @@ -73,5 +74,6 @@ optimism = [ "reth-execution-types/optimism", "reth-optimism-consensus/optimism", "revm/optimism", - "revm-primitives/optimism" + "revm-primitives/optimism", + "reth-optimism-primitives/op", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 79e0c451b79..b0b7065f336 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -41,7 +41,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true -reth-optimism-primitives.workspace = true +reth-optimism-primitives = { workspace = true, features = ["serde"] } # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -96,7 +96,8 @@ optimism = [ "reth-optimism-consensus/optimism", "reth-db/optimism", "reth-optimism-node/optimism", - "reth-node-core/optimism" + "reth-node-core/optimism", + "reth-optimism-primitives/op", ] asm-keccak = [ "reth-primitives/asm-keccak", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index bdc423e1498..38f76aa6256 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -20,45 +20,60 @@ reth-codecs = { workspace = true, optional = true, features = ["op"] } # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-rlp.workspace = true alloy-eips.workspace = true +revm-primitives.workspace = true +secp256k1 = { workspace = true, optional = true } # op op-alloy-consensus.workspace = true # codec -bytes.workspace = true +bytes = { workspace = true, optional = true } serde = { workspace = true, optional = true } # misc -derive_more.workspace = true +derive_more = { workspace = true, features = ["deref", "from", "into", "constructor"] } +rand = { workspace = true, optional = true } # test arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } [dev-dependencies] +proptest-arbitrary-interop.workspace = true reth-codecs = { workspace = true, features = ["test-utils", "op"] } rstest.workspace = true arbitrary.workspace = true +proptest.workspace = true [features] -default = ["std", "reth-codec"] +default = ["std"] std = [ "reth-primitives-traits/std", "reth-primitives/std", - "reth-codecs/std", + "reth-codecs?/std", "alloy-consensus/std", "alloy-eips/std", "alloy-primitives/std", - "serde/std", - "bytes/std", - "derive_more/std" + "serde?/std", + "bytes?/std", + "derive_more/std", + "revm-primitives/std", + "secp256k1?/std", + "alloy-rlp/std", ] reth-codec = [ "dep:reth-codecs", + "std", + "rand", + "dep:proptest", + "dep:arbitrary", "reth-primitives/reth-codec", "reth-primitives-traits/reth-codec", "reth-codecs?/op", - "reth-primitives/reth-codec" + "reth-primitives/reth-codec", + "dep:bytes", ] serde = [ "dep:serde", @@ -66,12 +81,16 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-eips/serde", - "bytes/serde", + "bytes?/serde", "reth-codecs?/serde", "op-alloy-consensus/serde", + "rand?/serde", + "revm-primitives/serde", + "secp256k1?/serde", ] arbitrary = [ "dep:arbitrary", + "dep:secp256k1", "reth-primitives-traits/arbitrary", "reth-primitives/arbitrary", "reth-codecs?/arbitrary", @@ -79,4 +98,9 @@ arbitrary = [ "alloy-consensus/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", + "revm-primitives/arbitrary", + "rand", +] +op = [ + "revm-primitives/optimism", ] diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 796f5cb0613..df504211021 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,17 +6,20 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "op")] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub mod bedrock; pub mod transaction; -use reth_primitives::EthPrimitives; -pub use transaction::{tx_type::OpTxType, OpTransaction}; +pub use transaction::{signed::OpTransactionSigned, tx_type::OpTxType, OpTransaction}; /// Optimism primitive types. -pub type OpPrimitives = EthPrimitives; +pub type OpPrimitives = reth_primitives::EthPrimitives; // TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` // implementation used exclusively by legacy engine. diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 5861a3229fe..86ac822c744 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -1,14 +1,21 @@ //! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. +pub mod signed; pub mod tx_type; use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; -use alloy_consensus::{constants::EIP7702_TX_TYPE_ID, TxLegacy}; +#[cfg(any(test, feature = "reth-codec"))] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; +use alloy_consensus::{SignableTransaction, TxLegacy}; use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; -use derive_more::{Deref, From}; -use op_alloy_consensus::{OpTypedTransaction, DEPOSIT_TX_TYPE_ID}; +use derive_more::{Constructor, Deref, From}; +use op_alloy_consensus::OpTypedTransaction; +#[cfg(any(test, feature = "reth-codec"))] +use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; +#[cfg(any(test, feature = "reth-codec"))] use reth_codecs::Compact; +#[cfg(any(test, feature = "reth-codec"))] use reth_primitives::transaction::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, @@ -17,16 +24,31 @@ use reth_primitives_traits::InMemorySize; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From)] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From, Constructor)] /// Optimistic transaction. pub struct OpTransaction(OpTypedTransaction); +impl OpTransaction { + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self.deref() { + OpTypedTransaction::Legacy(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip2930(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip1559(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip7702(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Deposit(_) => {} + } + } +} + impl Default for OpTransaction { fn default() -> Self { Self(OpTypedTransaction::Legacy(TxLegacy::default())) } } +#[cfg(any(test, feature = "reth-codec"))] impl Compact for OpTransaction { fn to_compact(&self, out: &mut B) -> usize where diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs new file mode 100644 index 00000000000..2dc72026e7c --- /dev/null +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -0,0 +1,479 @@ +//! A signed Optimism transaction. + +use alloc::vec::Vec; +use core::{ + hash::{Hash, Hasher}, + mem, +}; +#[cfg(feature = "std")] +use std::sync::OnceLock; + +use alloy_consensus::{ + transaction::RlpEcdsaTx, SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + keccak256, Address, Bytes, PrimitiveSignature as Signature, TxHash, TxKind, Uint, B256, U256, +}; +use alloy_rlp::Header; +use derive_more::{AsRef, Deref}; +#[cfg(not(feature = "std"))] +use once_cell::sync::OnceCell as OnceLock; +use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; +#[cfg(any(test, feature = "reth-codec"))] +use proptest as _; +use reth_primitives::{ + transaction::{recover_signer, recover_signer_unchecked}, + TransactionSigned, +}; +use reth_primitives_traits::{FillTxEnv, InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, OptimismFields, TxEnv}; + +use crate::{OpTransaction, OpTxType}; + +/// Signed transaction. +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Eq, AsRef, Deref)] +pub struct OpTransactionSigned { + /// Transaction hash + #[serde(skip)] + pub hash: OnceLock, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: OpTransaction, +} + +impl OpTransactionSigned { + /// Calculates hash of given transaction and signature and returns new instance. + pub fn new(transaction: OpTypedTransaction, signature: Signature) -> Self { + let signed_tx = Self::new_unhashed(transaction, signature); + if !matches!(signed_tx.tx_type(), OpTxType::Deposit) { + signed_tx.hash.get_or_init(|| signed_tx.recalculate_hash()); + } + + signed_tx + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: OpTypedTransaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction: OpTransaction::new(transaction) } + } +} + +impl SignedTransaction for OpTransactionSigned { + type Type = OpTxType; + + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer(signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer_unchecked(signature, signature_hash) + } + + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } +} + +impl FillTxEnv for OpTransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + let envelope = self.encoded_2718(); + + tx_env.caller = sender; + match self.transaction.deref() { + OpTypedTransaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + OpTypedTransaction::Deposit(tx) => { + tx_env.access_list.clear(); + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::ZERO; + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = None; + tx_env.nonce = None; + tx_env.authorization_list = None; + + tx_env.optimism = OptimismFields { + source_hash: Some(tx.source_hash), + mint: tx.mint, + is_system_transaction: Some(tx.is_system_transaction), + enveloped_tx: Some(envelope.into()), + }; + return + } + } + + tx_env.optimism = OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + enveloped_tx: Some(envelope.into()), + } + } +} + +impl InMemorySize for OpTransactionSigned { + #[inline] + fn size(&self) -> usize { + mem::size_of::() + self.transaction.size() + mem::size_of::() + } +} + +impl alloy_rlp::Encodable for OpTransactionSigned { + /// See [`alloy_rlp::Encodable`] impl for [`TransactionSigned`]. + fn encode(&self, out: &mut dyn alloy_rlp::bytes::BufMut) { + self.network_encode(out); + } + + fn length(&self) -> usize { + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); + } + + payload_length + } +} + +impl alloy_rlp::Decodable for OpTransactionSigned { + /// See [`alloy_rlp::Decodable`] impl for [`TransactionSigned`]. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for OpTransactionSigned { + fn type_flag(&self) -> Option { + match self.tx_type() { + op_alloy_consensus::OpTxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match self.transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + legacy_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + let Self { transaction, signature, .. } = self; + + match transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + // do nothing w/ with_header + legacy_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip7702(set_code_tx) => set_code_tx.eip2718_encode(signature, out), + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.encode_2718(out), + } + } +} + +impl Decodable2718 for OpTransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + op_alloy_consensus::OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + op_alloy_consensus::OpTxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip2930(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip1559(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Deposit => Ok(Self::new_unhashed( + OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; + let signed_tx = Self::new_unhashed(OpTypedTransaction::Legacy(transaction), signature); + signed_tx.hash.get_or_init(|| hash); + + Ok(signed_tx) + } +} + +impl Transaction for OpTransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + + fn is_create(&self) -> bool { + self.deref().is_create() + } + + fn value(&self) -> Uint<256, 4> { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.deref().blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.deref().effective_tip_per_gas(base_fee) + } +} + +impl Default for OpTransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: OpTransaction::new(OpTypedTransaction::Legacy(Default::default())), + } + } +} + +impl PartialEq for OpTransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.tx_hash() == other.tx_hash() + } +} + +impl Hash for OpTransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for OpTransactionSigned { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + #[allow(unused_mut)] + let mut transaction = OpTypedTransaction::arbitrary(u)?; + + let secp = secp256k1::Secp256k1::new(); + let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); + let signature = reth_primitives::transaction::util::secp256k1::sign_message( + B256::from_slice(&key_pair.secret_bytes()[..]), + signature_hash(&transaction), + ) + .unwrap(); + + // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces + // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that + // it's `None` if zero. + if let OpTypedTransaction::Deposit(ref mut tx_deposit) = transaction { + if tx_deposit.mint == Some(0) { + tx_deposit.mint = None; + } + } + + let signature = if is_deposit(&transaction) { TxDeposit::signature() } else { signature }; + + Ok(Self::new(transaction, signature)) + } +} + +/// Calculates the signing hash for the transaction. +pub fn signature_hash(tx: &OpTypedTransaction) -> B256 { + match tx { + OpTypedTransaction::Legacy(tx) => tx.signature_hash(), + OpTypedTransaction::Eip2930(tx) => tx.signature_hash(), + OpTypedTransaction::Eip1559(tx) => tx.signature_hash(), + OpTypedTransaction::Eip7702(tx) => tx.signature_hash(), + OpTypedTransaction::Deposit(_) => B256::ZERO, + } +} + +/// Returns `true` if transaction is deposit transaction. +pub const fn is_deposit(tx: &OpTypedTransaction) -> bool { + matches!(tx, OpTypedTransaction::Deposit(_)) +} diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 4b25066d675..9894dd8a3db 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -72,5 +72,6 @@ optimism = [ "reth-provider/optimism", "revm/optimism", "reth-optimism-consensus/optimism", - "reth-optimism-payload-builder/optimism" + "reth-optimism-payload-builder/optimism", + "reth-optimism-primitives/op", ] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 0eeaf310853..bae221531ca 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -42,6 +42,11 @@ pub use sidecar::BlobTransaction; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; +pub mod util; + pub(crate) mod access_list; mod compat; mod error; @@ -50,12 +55,6 @@ mod pooled; mod sidecar; mod tx_type; -/// Handling transaction signature operations, including signature recovery, -/// applying chain IDs, and EIP-2 validation. -pub mod signature; - -pub(crate) mod util; - #[cfg(any(test, feature = "reth-codec"))] pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, @@ -1178,7 +1177,7 @@ impl TransactionSigned { /// /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact /// format expected. - pub(crate) fn decode_rlp_legacy_transaction_tuple( + pub fn decode_rlp_legacy_transaction_tuple( data: &mut &[u8], ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { // keep this around, so we can use it to calculate the hash diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7964cc1c5f0..8eb1a639d96 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,7 +1,10 @@ +//! Utility functions for signature. + use alloy_primitives::{Address, PrimitiveSignature as Signature}; +/// Secp256k1 utility functions. #[cfg(feature = "secp256k1")] -pub(crate) mod secp256k1 { +pub mod secp256k1 { pub use super::impl_secp256k1::*; } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 5a9595794d9..f6d577aadbe 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -96,6 +96,7 @@ optimism = [ "reth-db/optimism", "reth-db-api/optimism", "revm/optimism", + "reth-optimism-primitives/op", ] serde = [ "dashmap/serde", From ae8912fa739e50fc4def85241690ded929c9b5b5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 09:38:17 +0100 Subject: [PATCH 837/970] feat(trie): proof blinded providers (#13085) --- Cargo.lock | 1 + crates/trie/sparse/src/blinded.rs | 8 ++ crates/trie/sparse/src/errors.rs | 3 + crates/trie/trie/Cargo.toml | 1 + crates/trie/trie/src/proof/blinded.rs | 116 ++++++++++++++++++ .../trie/trie/src/{proof.rs => proof/mod.rs} | 3 + 6 files changed, 132 insertions(+) create mode 100644 crates/trie/trie/src/proof/blinded.rs rename crates/trie/trie/src/{proof.rs => proof/mod.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index b799d0e0811..c72a1c8ddcb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9430,6 +9430,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "reth-trie-sparse", "revm", "serde_json", "tracing", diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index f82bdcd95bb..4cd88bd92a2 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -55,3 +55,11 @@ impl BlindedProvider for DefaultBlindedProvider { Ok(None) } } + +/// Right pad the path with 0s and return as [`B256`]. +#[inline] +pub fn pad_path_to_key(path: &Nibbles) -> B256 { + let mut padded = path.pack(); + padded.resize(32, 0); + B256::from_slice(&padded) +} diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index 20545957e1c..46102d2d468 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -55,4 +55,7 @@ pub enum SparseTrieError { /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), + /// Other. + #[error(transparent)] + Other(#[from] Box), } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index c1c3ae4dd87..011c95e6a92 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -17,6 +17,7 @@ reth-execution-errors.workspace = true reth-primitives.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true +reth-trie-sparse.workspace = true reth-trie-common.workspace = true revm.workspace = true diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs new file mode 100644 index 00000000000..adcaef46b08 --- /dev/null +++ b/crates/trie/trie/src/proof/blinded.rs @@ -0,0 +1,116 @@ +use super::{Proof, StorageProof}; +use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; +use reth_trie_sparse::{ + blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}, + SparseTrieError, +}; +use std::sync::Arc; + +/// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. +#[derive(Debug)] +pub struct ProofBlindedProviderFactory { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl BlindedProviderFactory for ProofBlindedProviderFactory +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type AccountNodeProvider = ProofBlindedAccountProvider; + type StorageNodeProvider = ProofBlindedStorageProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + ProofBlindedAccountProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + } + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + ProofBlindedStorageProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + account, + } + } +} + +/// Blinded provider for retrieving account trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedAccountProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl BlindedProvider for ProofBlindedAccountProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashMap::from_iter([(pad_path_to_key(&path), HashSet::default())]); + let proof = + Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) + .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) + .multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.account_subtree.into_inner().remove(&path)) + } +} + +/// Blinded provider for retrieving storage trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedStorageProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, + /// Target account. + account: B256, +} + +impl BlindedProvider for ProofBlindedStorageProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashSet::from_iter([pad_path_to_key(&path)]); + let storage_prefix_set = + self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); + let proof = StorageProof::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + self.account, + ) + .with_prefix_set_mut(storage_prefix_set) + .storage_multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.subtree.into_inner().remove(&path)) + } +} diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof/mod.rs similarity index 99% rename from crates/trie/trie/src/proof.rs rename to crates/trie/trie/src/proof/mod.rs index 3cb0ff6f2f7..c344ec76239 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -17,6 +17,9 @@ use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, }; +mod blinded; +pub use blinded::*; + /// A struct for generating merkle proofs. /// /// Proof generator adds the target address and slots to the prefix set, enables the proof retainer From 5724114947e74d4b8c5977c1d3e6828efb408fc6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 3 Dec 2024 12:46:37 +0400 Subject: [PATCH 838/970] feat: use primitive transaction as `PoolTransaction::Consensus` (#13086) --- crates/engine/util/src/reorg.rs | 5 ++- crates/ethereum/node/src/node.rs | 16 +++++--- crates/ethereum/payload/src/lib.rs | 9 +++-- crates/exex/test-utils/src/lib.rs | 7 ++-- crates/net/network/src/test_utils/testnet.rs | 15 ++++++-- crates/net/network/src/transactions/mod.rs | 21 +++++------ crates/node/api/src/node.rs | 6 +-- crates/node/builder/src/builder/mod.rs | 12 ++++-- crates/node/builder/src/components/builder.rs | 8 ++-- crates/node/builder/src/components/mod.rs | 10 +++-- crates/node/builder/src/components/pool.rs | 11 ++++-- crates/node/builder/src/rpc.rs | 3 ++ crates/optimism/node/src/node.rs | 16 +++++--- crates/optimism/node/src/txpool.rs | 6 +-- crates/optimism/node/tests/it/priority.rs | 8 ++-- crates/optimism/payload/src/builder.rs | 26 +++++++------ crates/optimism/rpc/src/eth/pending_block.rs | 7 ++-- crates/payload/util/src/traits.rs | 5 ++- crates/payload/util/src/transaction.rs | 30 ++++++++------- crates/primitives/src/transaction/mod.rs | 34 ++++++++--------- crates/rpc/rpc-builder/src/lib.rs | 18 ++++++--- .../rpc-eth-api/src/helpers/pending_block.rs | 10 +++-- .../rpc-eth-api/src/helpers/transaction.rs | 4 +- crates/rpc/rpc-eth-api/src/types.rs | 9 ++++- crates/rpc/rpc/src/eth/filter.rs | 19 ++++------ .../rpc/rpc/src/eth/helpers/pending_block.rs | 11 ++++-- crates/rpc/rpc/src/eth/pubsub.rs | 8 ++-- crates/rpc/rpc/src/txpool.rs | 25 +++++++------ crates/transaction-pool/src/maintain.rs | 26 ++++++------- crates/transaction-pool/src/pool/best.rs | 12 +++--- .../transaction-pool/src/test_utils/mock.rs | 10 +++-- crates/transaction-pool/src/traits.rs | 37 ++++++++++++------- crates/transaction-pool/src/validate/mod.rs | 11 +----- examples/custom-engine-types/src/main.rs | 10 +++-- examples/custom-evm/src/main.rs | 6 ++- examples/custom-inspector/src/main.rs | 3 +- examples/custom-payload-builder/src/main.rs | 8 ++-- examples/txpool-tracing/src/main.rs | 3 +- 38 files changed, 283 insertions(+), 202 deletions(-) diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 8e9a195a181..18a8c4737b5 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -18,7 +18,10 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, BlockExt, Receipt, Receipts}; +use reth_primitives::{ + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + Receipts, +}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index ad673588bf9..58b6aeaf644 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -13,7 +13,7 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, TxTy, }; use reth_node_builder::{ components::{ @@ -30,7 +30,7 @@ use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, + blobstore::DiskFileBlobStore, EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; @@ -243,7 +243,9 @@ impl EthereumPayloadBuilder { Types: NodeTypesWithEngine, Node: FullNodeTypes, Evm: ConfigureEvm
, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -280,7 +282,9 @@ impl PayloadServiceBuilder for EthereumPayloadBui where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -305,7 +309,9 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, { async fn build_network( self, diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index a5c6434310e..df900f1f36b 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -31,12 +31,13 @@ use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, + TransactionSigned, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, - BestTransactionsAttributes, TransactionPool, ValidPoolTransaction, + BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ @@ -93,7 +94,7 @@ impl PayloadBuilder for EthereumPayloadBu where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = EthPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; @@ -157,7 +158,7 @@ pub fn default_ethereum_payload( where EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -244,7 +245,7 @@ where } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9acad4d4b65..939bf21c022 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -30,7 +30,8 @@ use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, + FullNodeTypes, FullNodeTypesAdapter, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, }; use reth_node_builder::{ components::{ @@ -45,7 +46,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, EthStorage, ProviderFactory, @@ -64,7 +65,7 @@ pub struct TestPoolBuilder; impl PoolBuilder for TestPoolBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>>, { type Pool = TestPool; diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index bdd02118352..08bf24b8853 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -17,6 +17,7 @@ use reth_network_api::{ NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers, }; use reth_network_peers::PeerId; +use reth_primitives::TransactionSigned; use reth_provider::{test_utils::NoopProvider, ChainSpecProvider}; use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; use reth_tasks::TokioTaskExecutor; @@ -24,7 +25,7 @@ use reth_tokio_util::EventStream; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, test_utils::{TestPool, TestPoolBuilder}, - EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, + EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; use std::{ @@ -202,7 +203,9 @@ where + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { /// Spawns the testnet to a separate task pub fn spawn(self) -> TestnetHandle { @@ -267,7 +270,9 @@ where > + HeaderProvider + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { type Output = (); @@ -468,7 +473,9 @@ where > + HeaderProvider + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { type Output = (); diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index e17069b67c5..5463b20f7f3 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -702,9 +702,8 @@ where BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, >, - <::Transaction as PoolTransaction>::Consensus: - Into, - <::Transaction as PoolTransaction>::Pooled: Into, + Pool::Transaction: + PoolTransaction>, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -1011,9 +1010,8 @@ where impl TransactionsManager where Pool: TransactionPool + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, - <::Transaction as PoolTransaction>::Pooled: - Into, + Pool::Transaction: + PoolTransaction>, { /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { @@ -1313,9 +1311,8 @@ where impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, - <::Transaction as PoolTransaction>::Pooled: - Into, + Pool::Transaction: + PoolTransaction>, { type Output = (); @@ -1503,11 +1500,11 @@ impl PropagateTransaction { /// Create a new instance from a pooled transaction fn new

(tx: Arc>) -> Self where - P: PoolTransaction>, + P: PoolTransaction, { let size = tx.encoded_length(); - let transaction = tx.transaction.clone_into_consensus().into(); - let transaction = Arc::new(transaction); + let transaction = tx.transaction.clone_into_consensus(); + let transaction = Arc::new(transaction.into_signed()); Self { size, transaction } } diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 7778fea7b5e..83947208ca8 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -8,11 +8,11 @@ use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; -use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, TxTy}; use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful @@ -47,7 +47,7 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm

; diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index b311cc4e2a0..3cab01aa71b 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -37,7 +37,7 @@ use reth_provider::{ BlockReader, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{PoolConfig, TransactionPool}; +use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; use std::sync::Arc; @@ -650,7 +650,10 @@ impl BuilderContext { /// connected to that network. pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction, + > + Unpin + + 'static, Node::Provider: BlockReader< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, @@ -673,7 +676,10 @@ impl BuilderContext { tx_config: TransactionsManagerConfig, ) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction, + > + Unpin + + 'static, Node::Provider: BlockReader< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index b265dc927e7..4c04c9200d2 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -10,9 +10,9 @@ use crate::{ use alloy_consensus::Header; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. @@ -375,7 +375,9 @@ where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future>> + Send, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index a7d15dd29df..22a47e3daa4 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -27,9 +27,9 @@ use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// An abstraction over the components of a node, consisting of: /// - evm and executor @@ -38,7 +38,7 @@ use reth_transaction_pool::TransactionPool; /// - payload builder. pub trait NodeComponents: Clone + Unpin + Send + Sync + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm
; @@ -97,7 +97,9 @@ impl NodeComponents for Components where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, EVM: ConfigureEvm
, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 436a80c52e0..5b08e0a7739 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,7 +1,8 @@ //! Pool component for the node builder. use alloy_primitives::Address; -use reth_transaction_pool::{PoolConfig, SubPoolLimit, TransactionPool}; +use reth_node_api::TxTy; +use reth_transaction_pool::{PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool}; use std::{collections::HashSet, future::Future}; use crate::{BuilderContext, FullNodeTypes}; @@ -9,7 +10,9 @@ use crate::{BuilderContext, FullNodeTypes}; /// A type that knows how to build the transaction pool. pub trait PoolBuilder: Send { /// The transaction pool to build. - type Pool: TransactionPool + Unpin + 'static; + type Pool: TransactionPool>> + + Unpin + + 'static; /// Creates the transaction pool. fn build_pool( @@ -21,7 +24,9 @@ pub trait PoolBuilder: Send { impl PoolBuilder for F where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 55313f3e989..24b7db77d88 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::TransactionPool; use crate::EthApiBuilderCtx; @@ -405,6 +406,7 @@ where N: FullNodeComponents< Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, + Pool: TransactionPool::Transaction>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -527,6 +529,7 @@ where N: FullNodeComponents< Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, + Pool: TransactionPool::Transaction>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 73adcb43f18..a13ab9dcec1 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -13,7 +13,7 @@ use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, TxTy, }; use reth_node_builder::{ components::{ @@ -42,7 +42,7 @@ use reth_provider::{ use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; @@ -465,7 +465,9 @@ where Primitives = OpPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Evm: ConfigureEvm
, { let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) @@ -505,7 +507,9 @@ where Primitives = OpPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Txs: OpPayloadTransactions, { async fn spawn_payload_service( @@ -577,7 +581,9 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, { async fn build_network( self, diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index d8246aeb7db..a3e474a6076 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -70,7 +70,7 @@ impl OpTransactionValidator { impl OpTransactionValidator where Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. pub fn new(inner: EthTransactionValidator) -> Self { @@ -142,7 +142,7 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - let tx: TransactionSigned = valid_tx.transaction().clone_into_consensus().into(); + let tx = valid_tx.transaction().clone_into_consensus(); tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( @@ -196,7 +196,7 @@ where impl TransactionValidator for OpTransactionValidator where Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Tx: EthPoolTransaction, { type Transaction = Tx; diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index b5487987f6a..1b49ed684bf 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -30,7 +30,7 @@ use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTr use reth_primitives::{RecoveredTx, SealedBlock, Transaction, TransactionSigned}; use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskManager; -use reth_transaction_pool::pool::BestPayloadTransactions; +use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; use std::sync::Arc; use tokio::sync::Mutex; @@ -44,9 +44,11 @@ impl OpPayloadTransactions for CustomTxPriority { &self, pool: Pool, attr: reth_transaction_pool::BestTransactionsAttributes, - ) -> impl PayloadTransactions + ) -> impl PayloadTransactions where - Pool: reth_transaction_pool::TransactionPool, + Pool: reth_transaction_pool::TransactionPool< + Transaction: PoolTransaction, + >, { // Block composition: // 1. Best transactions from the pool (up to 250k gas) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 6ae52188d18..d385ca79546 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -19,12 +19,13 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::PayloadTransactions; use reth_primitives::{ - proofs, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + SealedHeader, TransactionSigned, TxType, }; use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, PoolTransaction, TransactionPool, }; use reth_trie::HashedPostState; use revm::{ @@ -112,7 +113,7 @@ where ) -> Result, PayloadBuilderError> where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { let (initialized_cfg, initialized_block_env) = self .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) @@ -213,7 +214,7 @@ where impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { @@ -281,7 +282,7 @@ pub struct OpBuilder { impl OpBuilder where - Pool: TransactionPool, + Pool: TransactionPool>, Txs: OpPayloadTransactions, { /// Executes the payload and returns the outcome. @@ -479,19 +480,23 @@ where pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { /// Returns an iterator that yields the transaction in the order they should get included in the /// new payload. - fn best_transactions( + fn best_transactions< + Pool: TransactionPool>, + >( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; + ) -> impl PayloadTransactions; } impl OpPayloadTransactions for () { - fn best_transactions( + fn best_transactions< + Pool: TransactionPool>, + >( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions { + ) -> impl PayloadTransactions { BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) } } @@ -830,11 +835,10 @@ where &self, info: &mut ExecutionInfo, db: &mut State, - mut best_txs: impl PayloadTransactions, + mut best_txs: impl PayloadTransactions, ) -> Result, PayloadBuilderError> where DB: Database, - Pool: TransactionPool, { let block_gas_limit = self.block_gas_limit(); let base_fee = self.base_fee(); diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index fec610bb1e9..9a8d169e527 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -9,7 +9,7 @@ use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ @@ -17,7 +17,7 @@ use reth_rpc_eth_api::{ FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::primitives::BlockEnv; impl LoadPendingBlock for OpEthApi @@ -25,13 +25,14 @@ where Self: SpawnBlocking, N: RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, + Pool: TransactionPool>>, Evm: ConfigureEvm
, >, { diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs index 5c1eb38bea3..e9bb7e03704 100644 --- a/crates/payload/util/src/traits.rs +++ b/crates/payload/util/src/traits.rs @@ -7,12 +7,15 @@ use reth_primitives::RecoveredTx; /// Can include transactions from the pool and other sources (alternative pools, /// sequencer-originated transactions, etc.). pub trait PayloadTransactions { + /// The transaction type this iterator yields. + type Transaction; + /// Returns the next transaction to include in the block. fn next( &mut self, // In the future, `ctx` can include access to state for block building purposes. ctx: (), - ) -> Option; + ) -> Option>; /// Exclude descendants of the transaction with given sender and nonce from the iterator, /// because this transaction won't be included in the block. diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs index ebd3b079626..71387946aef 100644 --- a/crates/payload/util/src/transaction.rs +++ b/crates/payload/util/src/transaction.rs @@ -26,8 +26,10 @@ impl PayloadTransactionsFixed { } } -impl PayloadTransactions for PayloadTransactionsFixed { - fn next(&mut self, _ctx: ()) -> Option { +impl PayloadTransactions for PayloadTransactionsFixed> { + type Transaction = T; + + fn next(&mut self, _ctx: ()) -> Option> { (self.index < self.transactions.len()).then(|| { let tx = self.transactions[self.index].clone(); self.index += 1; @@ -87,20 +89,22 @@ impl PayloadTransactionsChain PayloadTransactions for PayloadTransactionsChain +impl PayloadTransactions for PayloadTransactionsChain where - B: PayloadTransactions, - A: PayloadTransactions, + A: PayloadTransactions, + B: PayloadTransactions, { - fn next(&mut self, ctx: ()) -> Option { + type Transaction = A::Transaction; + + fn next(&mut self, ctx: ()) -> Option> { while let Some(tx) = self.before.next(ctx) { if let Some(before_max_gas) = self.before_max_gas { - if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { - self.before_gas += tx.transaction.gas_limit(); + if self.before_gas + tx.as_signed().gas_limit() <= before_max_gas { + self.before_gas += tx.as_signed().gas_limit(); return Some(tx); } - self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.before.mark_invalid(tx.signer(), tx.as_signed().nonce()); + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); } else { return Some(tx); } @@ -108,11 +112,11 @@ where while let Some(tx) = self.after.next(ctx) { if let Some(after_max_gas) = self.after_max_gas { - if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { - self.after_gas += tx.transaction.gas_limit(); + if self.after_gas + tx.as_signed().gas_limit() <= after_max_gas { + self.after_gas += tx.as_signed().gas_limit(); return Some(tx); } - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); } else { return Some(tx); } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index bae221531ca..6fd8fb55a6d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1130,23 +1130,6 @@ impl TransactionSigned { Some(RecoveredTx { signed_transaction: self, signer }) } - /// Tries to recover signer and return [`RecoveredTx`] by cloning the type. - pub fn try_ecrecovered(&self) -> Option { - let signer = self.recover_signer()?; - Some(RecoveredTx { signed_transaction: self.clone(), signer }) - } - - /// Tries to recover signer and return [`RecoveredTx`]. - /// - /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn try_into_ecrecovered(self) -> Result { - match self.recover_signer() { - None => Err(self), - Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), - } - } - /// Tries to recover signer and return [`RecoveredTx`]. _without ensuring that /// the signature has a low `s` value_ (EIP-2). /// @@ -1707,6 +1690,23 @@ impl Encodable2718 for RecoveredTx { /// Extension trait for [`SignedTransaction`] to convert it into [`RecoveredTx`]. pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { + /// Tries to recover signer and return [`RecoveredTx`] by cloning the type. + fn try_ecrecovered(&self) -> Option> { + let signer = self.recover_signer()?; + Some(RecoveredTx { signed_transaction: self.clone(), signer }) + } + + /// Tries to recover signer and return [`RecoveredTx`]. + /// + /// Returns `Err(Self)` if the transaction's signature is invalid, see also + /// [`SignedTransaction::recover_signer`]. + fn try_into_ecrecovered(self) -> Result, Self> { + match self.recover_signer() { + None => Err(self), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), + } + } + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index e2141dcf1ce..99912eddf97 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -26,7 +26,7 @@ //! RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, //! }; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; //! //! pub async fn launch( //! provider: Provider, @@ -44,7 +44,9 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool + Unpin + 'static, +//! Pool: TransactionPool> +//! + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: //! CanonStateSubscriptions + Clone + 'static, @@ -95,7 +97,7 @@ //! }; //! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; //! use tokio::try_join; //! //! pub async fn launch< @@ -125,7 +127,9 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool + Unpin + 'static, +//! Pool: TransactionPool> +//! + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: //! CanonStateSubscriptions + Clone + 'static, @@ -277,7 +281,7 @@ where Header = reth_primitives::Header, > + AccountReader + ChangeSetReader, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, @@ -674,6 +678,7 @@ where Receipt = ::Receipt, Header = ::Header, >, + Pool: TransactionPool::Transaction>, { let Self { provider, @@ -793,6 +798,7 @@ where Receipt = ::Receipt, Header = ::Header, >, + Pool: TransactionPool::Transaction>, { let mut modules = TransportRpcModules::default(); @@ -1328,7 +1334,7 @@ where Header = ::Header, > + AccountReader + ChangeSetReader, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 72d53a22a23..782c1a2a8f6 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -22,7 +22,7 @@ use reth_primitives::{ }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ProviderReceipt, ReceiptProvider, StateProviderFactory, + ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, @@ -33,7 +33,8 @@ use reth_revm::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{ - error::InvalidPoolTransactionError, BestTransactionsAttributes, TransactionPool, + error::InvalidPoolTransactionError, BestTransactionsAttributes, PoolTransaction, + TransactionPool, }; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; @@ -48,13 +49,14 @@ pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, + Pool: TransactionPool>>, Evm: ConfigureEvm
, > { @@ -319,7 +321,7 @@ pub trait LoadPendingBlock: } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 43ac03adaf7..9d77e01193b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -240,7 +240,7 @@ pub trait EthTransactions: LoadTransaction { RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone_into_consensus(); - return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); + return Ok(Some(from_recovered(transaction, self.tx_resp_builder())?)); } } @@ -385,7 +385,7 @@ pub trait EthTransactions: LoadTransaction { let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus( - transaction.into(), + transaction, ) .map_err(|_| EthApiError::TransactionConversionError)?; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 2bac068483c..62af1432b11 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -8,8 +8,9 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; use reth_primitives::TransactionSigned; -use reth_provider::{ReceiptProvider, TransactionsProvider}; +use reth_provider::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; @@ -50,6 +51,9 @@ where Self: RpcNodeCore< Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, @@ -64,6 +68,9 @@ impl FullEthApiTypes for T where T: RpcNodeCore< Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index c1ef67d9b59..6ed72b6ca74 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -9,7 +9,7 @@ use alloy_rpc_types_eth::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::{Receipt, RecoveredTx, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, ProviderError}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, @@ -145,7 +145,7 @@ where impl EthFilter where Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Eth: FullEthApiTypes, { /// Returns all the filter changes for the given id, if any @@ -245,7 +245,7 @@ impl EthFilterApiServer> for EthFilter where Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Eth: FullEthApiTypes + 'static, { /// Handler for `eth_newFilter` @@ -611,7 +611,7 @@ struct FullTransactionsReceiver { impl FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat, + TxCompat: TransactionCompat, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { @@ -619,15 +619,12 @@ where } /// Returns all new pending transactions received since the last poll. - async fn drain(&self) -> FilterChanges - where - T: PoolTransaction>, - { + async fn drain(&self) -> FilterChanges { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - match from_recovered(tx.transaction.to_recovered_transaction(), &self.tx_resp_builder) { + match from_recovered(tx.transaction.to_consensus(), &self.tx_resp_builder) { Ok(tx) => pending_txs.push(tx), Err(err) => { error!(target: "rpc", @@ -651,8 +648,8 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { impl FullTransactionsFilter for FullTransactionsReceiver where - T: PoolTransaction> + 'static, - TxCompat: TransactionCompat + 'static, + T: PoolTransaction + 'static, + TxCompat: TransactionCompat + 'static, { async fn drain(&self) -> FilterChanges { Self::drain(self).await diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 794b9dde82f..34c0ae96261 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -3,13 +3,15 @@ use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderTx, StateProviderFactory, +}; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, RpcNodeCore, }; use reth_rpc_eth_types::PendingBlock; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::EthApi; @@ -19,13 +21,16 @@ where Self: SpawnBlocking + RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, Evm: ConfigureEvm
, >, { diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 8ad809b8b18..58c62133730 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -23,7 +23,7 @@ use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; +use reth_transaction_pool::{NewTransactionEvent, PoolConsensusTx, TransactionPool}; use serde::Serialize; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, @@ -95,7 +95,7 @@ where > + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat + 'static, + Eth: TransactionCompat> + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -135,7 +135,7 @@ where > + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat, + Eth: TransactionCompat>, { match kind { SubscriptionKind::NewHeads => { @@ -165,7 +165,7 @@ where // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { let tx_value = match from_recovered( - tx.transaction.to_recovered_transaction(), + tx.transaction.to_consensus(), &tx_resp_builder, ) { Ok(tx) => { diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index b12e8e7ab57..4709c9878fa 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -8,10 +8,11 @@ use alloy_rpc_types_txpool::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_primitives::RecoveredTx; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; -use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{ + AllPoolTransactions, PoolConsensusTx, PoolTransaction, TransactionPool, +}; use tracing::trace; /// `txpool` API implementation. @@ -33,8 +34,8 @@ impl TxPoolApi { impl TxPoolApi where - Pool: TransactionPool + 'static, - Eth: TransactionCompat, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat>, { fn content(&self) -> Result, Eth::Error> { #[inline] @@ -44,12 +45,12 @@ where resp_builder: &RpcTxB, ) -> Result<(), RpcTxB::Error> where - Tx: PoolTransaction>, - RpcTxB: TransactionCompat, + Tx: PoolTransaction, + RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone_into_consensus().into(), resp_builder)?, + from_recovered(tx.clone_into_consensus(), resp_builder)?, ); Ok(()) @@ -72,8 +73,8 @@ where #[async_trait] impl TxPoolApiServer for TxPoolApi where - Pool: TransactionPool + 'static, - Eth: TransactionCompat + 'static, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat> + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. @@ -96,19 +97,19 @@ where trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] - fn insert>>( + fn insert>( tx: &T, inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx: RecoveredTx = tx.clone_into_consensus().into(); + let tx = tx.clone_into_consensus(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { to: tx.to(), value: tx.value(), gas: tx.gas_limit() as u128, - gas_price: tx.transaction.max_fee_per_gas(), + gas_price: tx.max_fee_per_gas(), }, ); } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 7e28b6e2685..fa7b75e34ad 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -10,6 +10,7 @@ use crate::{ use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Address, BlockHash, BlockNumber}; +use alloy_rlp::Encodable; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -19,7 +20,8 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - PooledTransactionsElementEcRecovered, RecoveredTx, SealedHeader, TransactionSigned, + transaction::SignedTransactionIntoRecoveredExt, PooledTransactionsElementEcRecovered, + SealedHeader, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -78,7 +80,7 @@ pub fn maintain_transaction_pool_future( ) -> BoxFuture<'static, ()> where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -99,7 +101,7 @@ pub async fn maintain_transaction_pool( config: MaintainPoolConfig, ) where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -342,7 +344,7 @@ pub async fn maintain_transaction_pool(

::Transaction::from_pooled(tx.into()) }) } else { -

::Transaction::try_from_consensus(tx.into()).ok() +

::Transaction::try_from_consensus(tx).ok() } }) .collect::>(); @@ -559,7 +561,7 @@ async fn load_and_reinsert_transactions

( file_path: &Path, ) -> Result<(), TransactionsBackupError> where - P: TransactionPool, + P: TransactionPool>, { if !file_path.exists() { return Ok(()) @@ -572,14 +574,15 @@ where return Ok(()) } - let txs_signed: Vec = alloy_rlp::Decodable::decode(&mut data.as_slice())?; + let txs_signed: Vec<::Consensus> = + alloy_rlp::Decodable::decode(&mut data.as_slice())?; let pool_transactions = txs_signed .into_iter() .filter_map(|tx| tx.try_ecrecovered()) .filter_map(|tx| { // Filter out errors - ::try_from_consensus(tx.into()).ok() + ::try_from_consensus(tx).ok() }) .collect(); @@ -592,7 +595,7 @@ where fn save_local_txs_backup

(pool: P, file_path: &Path) where - P: TransactionPool, + P: TransactionPool>, { let local_transactions = pool.get_local_transactions(); if local_transactions.is_empty() { @@ -602,10 +605,7 @@ where let local_transactions = local_transactions .into_iter() - .map(|tx| { - let recovered: RecoveredTx = tx.transaction.clone_into_consensus().into(); - recovered.into_signed() - }) + .map(|tx| tx.transaction.clone_into_consensus().into_signed()) .collect::>(); let num_txs = local_transactions.len(); @@ -645,7 +645,7 @@ pub async fn backup_local_transactions_task

( pool: P, config: LocalTransactionBackupConfig, ) where - P: TransactionPool + Clone, + P: TransactionPool> + Clone, { let Some(transactions_path) = config.transactions_path else { // nothing to do diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index be49ce0b1fd..b770e3da4b0 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -226,7 +226,7 @@ impl Iterator for BestTransactions { #[derive(Debug)] pub struct BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { invalid: HashSet

, @@ -235,7 +235,7 @@ where impl BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { /// Create a new `BestPayloadTransactions` with the given iterator. @@ -246,16 +246,18 @@ where impl PayloadTransactions for BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { - fn next(&mut self, _ctx: ()) -> Option { + type Transaction = T::Consensus; + + fn next(&mut self, _ctx: ()) -> Option> { loop { let tx = self.best.next()?; if self.invalid.contains(&tx.sender()) { continue } - return Some(tx.to_recovered_transaction()) + return Some(tx.to_consensus()) } } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 05551151d78..471956fdca5 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -592,15 +592,17 @@ impl MockTransaction { impl PoolTransaction for MockTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = RecoveredTx; + type Consensus = TransactionSigned; type Pooled = PooledTransactionsElementEcRecovered; - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } @@ -609,7 +611,7 @@ impl PoolTransaction for MockTransaction { } fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result { Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index a5c85ce125b..6d4d562b3bd 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -38,6 +38,9 @@ use tokio::sync::mpsc::Receiver; /// The `PeerId` type. pub type PeerId = alloy_primitives::B512; +/// Helper type alias to access [`PoolTransaction::Consensus`] for a given [`TransactionPool`]. +pub type PoolConsensusTx

= <

::Transaction as PoolTransaction>::Consensus; + /// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, @@ -577,17 +580,17 @@ pub struct AllPoolTransactions { impl AllPoolTransactions { /// Returns an iterator over all pending [`RecoveredTx`] transactions. - pub fn pending_recovered(&self) -> impl Iterator + '_ { + pub fn pending_recovered(&self) -> impl Iterator> + '_ { self.pending.iter().map(|tx| tx.transaction.clone().into()) } /// Returns an iterator over all queued [`RecoveredTx`] transactions. - pub fn queued_recovered(&self) -> impl Iterator + '_ { + pub fn queued_recovered(&self) -> impl Iterator> + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } /// Returns an iterator over all transactions, both pending and queued. - pub fn all(&self) -> impl Iterator + '_ { + pub fn all(&self) -> impl Iterator> + '_ { self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) } } @@ -963,30 +966,39 @@ impl BestTransactionsAttributes { /// This distinction is necessary for the EIP-4844 blob transactions, which require an additional /// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is /// a subset of the `Pooled` format. -pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { +pub trait PoolTransaction: + fmt::Debug + + Send + + Sync + + Clone + + TryFrom, Error = Self::TryFromConsensusError> + + Into> +{ /// Associated error type for the `try_from_consensus` method. type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. - type Consensus: From + TryInto; + type Consensus; /// Associated type representing the recovered pooled variant of the transaction. type Pooled: Encodable2718 + Into; /// Define a method to convert from the `Consensus` type to `Self` - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } /// Clone the transaction into a consensus variant. /// /// This method is preferred when the [`PoolTransaction`] already wraps the consensus variant. - fn clone_into_consensus(&self) -> Self::Consensus { + fn clone_into_consensus(&self) -> RecoveredTx { self.clone().into_consensus() } /// Define a method to convert from the `Self` type to `Consensus` - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } @@ -1002,7 +1014,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Tries to convert the `Consensus` type into the `Pooled` type. fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result; /// Hash of the transaction. @@ -1131,7 +1143,6 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< - Consensus: From + Into + Into, Pooled: From + Into + Into, @@ -1242,16 +1253,16 @@ impl From for EthPooledTransaction { impl PoolTransaction for EthPooledTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = RecoveredTx; + type Consensus = TransactionSigned; type Pooled = PooledTransactionsElementEcRecovered; - fn clone_into_consensus(&self) -> Self::Consensus { + fn clone_into_consensus(&self) -> RecoveredTx { self.transaction().clone() } fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result { Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index d333be87963..6c625373401 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -378,7 +378,7 @@ impl ValidPoolTransaction { /// Converts to this type into the consensus transaction of the pooled transaction. /// /// Note: this takes `&self` since indented usage is via `Arc`. - pub fn to_consensus(&self) -> T::Consensus { + pub fn to_consensus(&self) -> RecoveredTx { self.transaction.clone_into_consensus() } @@ -435,15 +435,6 @@ impl ValidPoolTransaction { } } -impl>> ValidPoolTransaction { - /// Converts to this type into a [`RecoveredTx`]. - /// - /// Note: this takes `&self` since indented usage is via `Arc`. - pub fn to_recovered_transaction(&self) -> RecoveredTx { - self.to_consensus().into() - } -} - #[cfg(test)] impl Clone for ValidPoolTransaction { fn clone(&self) -> Self { diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f9ac5c23865..1034effebf8 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -38,14 +38,14 @@ use reth::{ }, network::NetworkHandle, payload::ExecutionPayloadValidator, - primitives::{Block, EthPrimitives, SealedBlockFor}, + primitives::{Block, EthPrimitives, SealedBlockFor, TransactionSigned}, providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, rpc::{ eth::EthApi, types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, }, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::{ BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome, @@ -340,7 +340,9 @@ where Primitives = EthPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, @@ -380,7 +382,7 @@ pub struct CustomPayloadBuilder; impl PayloadBuilder for CustomPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = CustomPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index e7999818ae1..d9e341c02cc 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -20,7 +20,7 @@ use reth::{ }, rpc::types::engine::PayloadAttributes, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_chainspec::{Chain, ChainSpec}; use reth_evm_ethereum::EthEvmConfig; @@ -183,7 +183,9 @@ impl PayloadServiceBuilder for MyPayloadBuilder where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = PayloadAttributes, diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 67863d00e1e..6b25c46b76c 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -54,8 +54,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // convert the pool transaction - let call_request = - transaction_to_call_request(tx.to_recovered_transaction()); + let call_request = transaction_to_call_request(tx.to_consensus()); let result = eth_api .spawn_with_call_at( diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 6047da0dd1b..d7c42e341b5 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -17,14 +17,14 @@ use reth::{ cli::{config::PayloadBuilderConfig, Cli}, payload::PayloadBuilderHandle, providers::CanonStateSubscriptions, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, TransactionSigned}; pub mod generator; pub mod job; @@ -42,7 +42,9 @@ where Primitives = EthPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 94f800987a9..76abd65f4af 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -44,8 +44,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // trace the transaction with `trace_call` - let callrequest = - transaction_to_call_request(tx.to_recovered_transaction()); + let callrequest = transaction_to_call_request(tx.to_consensus()); let tracerequest = TraceCallRequest::new(callrequest) .with_trace_type(TraceType::Trace); if let Ok(trace_result) = traceapi.trace_call(tracerequest).await { From 74017bac0aebf4143d34005301f53087fe76c00f Mon Sep 17 00:00:00 2001 From: morito Date: Tue, 3 Dec 2024 17:48:21 +0900 Subject: [PATCH 839/970] feat: Use `PrimitiveSignature` instead of `Signature` (#13087) --- crates/net/network/src/transactions/validation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 1575d9f3374..1018cde6b55 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -3,7 +3,7 @@ //! announcements. Validation and filtering of announcements is network dependent. use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; -use alloy_primitives::{Signature, TxHash}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use derive_more::{Deref, DerefMut}; use reth_eth_wire::{ DedupPayload, Eth68TxMetadata, HandleMempoolData, PartiallyValidData, ValidAnnouncementData, From 383b8c242e0e93f7683a807c881c3111be0dad70 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 3 Dec 2024 10:01:02 +0100 Subject: [PATCH 840/970] fix(engine): get_proof_targets only add fetched accounts if they have new storage (#13015) Co-authored-by: Roman Krasiuk --- crates/engine/tree/src/tree/root.rs | 210 +++++++++++++++++++++++++--- 1 file changed, 189 insertions(+), 21 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 31e79ca04b5..70f54ce4d48 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -464,31 +464,37 @@ where } } +/// Returns accounts only with those storages that were not already fetched, and +/// if there are no such storages and the account itself was already fetched, the +/// account shouldn't be included. fn get_proof_targets( state_update: &HashedPostState, fetched_proof_targets: &HashMap>, ) -> HashMap> { - state_update - .accounts - .keys() - .filter(|hashed_address| !fetched_proof_targets.contains_key(*hashed_address)) - .map(|hashed_address| (*hashed_address, HashSet::default())) - .chain(state_update.storages.iter().map(|(hashed_address, storage)| { - let fetched_storage_proof_targets = fetched_proof_targets.get(hashed_address); - ( - *hashed_address, - storage - .storage - .keys() - .filter(|slot| { - !fetched_storage_proof_targets - .is_some_and(|targets| targets.contains(*slot)) - }) - .copied() - .collect(), - ) - })) - .collect() + let mut targets = HashMap::default(); + + // first collect all new accounts (not previously fetched) + for &hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(&hashed_address) { + targets.insert(hashed_address, HashSet::default()); + } + } + + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) + .peekable(); + + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); + } + } + + targets } /// Updates the sparse trie with the given proofs and state, and returns the updated trie and the @@ -793,4 +799,166 @@ mod tests { assert_eq!(ready.len(), 5); assert!(!sequencer.has_pending()); } + + fn create_get_proof_targets_state() -> HashedPostState { + let mut state = HashedPostState::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + let slot1 = B256::random(); + let slot2 = B256::random(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + state + } + + #[test] + fn test_get_proof_targets_new_account_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // should return all accounts as targets since nothing was fetched before + assert_eq!(targets.len(), state.accounts.len()); + for addr in state.accounts.keys() { + assert!(targets.contains_key(addr)); + } + } + + #[test] + fn test_get_proof_targets_new_storage_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // verify storage slots are included for accounts with storage + for (addr, storage) in &state.storages { + assert!(targets.contains_key(addr)); + let target_slots = &targets[addr]; + assert_eq!(target_slots.len(), storage.storage.len()); + for slot in storage.storage.keys() { + assert!(target_slots.contains(slot)); + } + } + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_accounts() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // select an account that has no storage updates + let fetched_addr = state + .accounts + .keys() + .find(|&&addr| !state.storages.contains_key(&addr)) + .expect("Should have an account without storage"); + + // mark the account as already fetched + fetched.insert(*fetched_addr, HashSet::default()); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched account since it has no storage updates + assert!(!targets.contains_key(fetched_addr)); + // other accounts should still be included + assert_eq!(targets.len(), state.accounts.len() - 1); + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_storage() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // mark one storage slot as already fetched + let (addr, storage) = state.storages.iter().next().unwrap(); + let mut fetched_slots = HashSet::default(); + let fetched_slot = *storage.storage.keys().next().unwrap(); + fetched_slots.insert(fetched_slot); + fetched.insert(*addr, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched storage slot + let target_slots = &targets[addr]; + assert!(!target_slots.contains(&fetched_slot)); + assert_eq!(target_slots.len(), storage.storage.len() - 1); + } + + #[test] + fn test_get_proof_targets_empty_state() { + let state = HashedPostState::default(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.is_empty()); + } + + #[test] + fn test_get_proof_targets_mixed_fetched_state() { + let mut state = HashedPostState::default(); + let mut fetched = HashMap::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + let mut fetched_slots = HashSet::default(); + fetched_slots.insert(slot1); + fetched.insert(addr1, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.contains_key(&addr2)); + assert!(!targets[&addr1].contains(&slot1)); + assert!(targets[&addr1].contains(&slot2)); + } + + #[test] + fn test_get_proof_targets_unmodified_account_with_storage() { + let mut state = HashedPostState::default(); + let fetched = HashMap::default(); + + let addr = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + // don't add the account to state.accounts (simulating unmodified account) + // but add storage updates for this account + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::from(1)); + storage.storage.insert(slot2, U256::from(2)); + state.storages.insert(addr, storage); + + assert!(!state.accounts.contains_key(&addr)); + assert!(!fetched.contains_key(&addr)); + + let targets = get_proof_targets(&state, &fetched); + + // verify that we still get the storage slots for the unmodified account + assert!(targets.contains_key(&addr)); + + let target_slots = &targets[&addr]; + assert_eq!(target_slots.len(), 2); + assert!(target_slots.contains(&slot1)); + assert!(target_slots.contains(&slot2)); + } } From 82b97a8dd383d596b1a3978073bebacb2810c993 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 10:46:41 +0100 Subject: [PATCH 841/970] chore: fix unused warning (#13090) --- crates/trie/common/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 9f81d020eb3..73fce5f8e7b 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -67,7 +67,7 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-trie/serde", - "alloy-rpc-types-eth/serde", + "alloy-rpc-types-eth?/serde", "revm-primitives/serde", "reth-primitives-traits/serde", "reth-codecs/serde" From 84e1fb92e0a17025c01ea46a86864303cff71983 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 11:58:33 +0100 Subject: [PATCH 842/970] feat: impl more noop functions (#13029) --- crates/net/network/src/config.rs | 6 +- crates/storage/storage-api/src/noop.rs | 530 ++++++++++++++++++++++++- 2 files changed, 520 insertions(+), 16 deletions(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index bde2cf78d97..44f34c3a4b0 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -16,7 +16,7 @@ use reth_eth_wire::{ use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; +use reth_storage_api::{noop::NoopProvider, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; @@ -498,11 +498,11 @@ impl NetworkConfigBuilder { pub fn build_with_noop_provider( self, chain_spec: Arc, - ) -> NetworkConfig, N> + ) -> NetworkConfig, N> where ChainSpec: EthChainSpec + Hardforks + 'static, { - self.build(NoopBlockReader::new(chain_spec)) + self.build(NoopProvider::eth(chain_spec)) } /// Sets the NAT resolver for external IP. diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 7325e2b7436..9c971e7b293 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -1,28 +1,81 @@ //! Various noop implementations for traits. -use std::sync::Arc; - -use crate::{BlockHashReader, BlockNumReader}; -use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; +use crate::{ + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, ChangeSetReader, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, + StateProvider, StateRootProvider, StorageRootProvider, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, +}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, +}; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; +use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_primitives::{ + BlockWithSenders, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, TransactionMeta, +}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, SealedHeader}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; +use reth_trie::{ + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, +}; +use std::{ + marker::PhantomData, + ops::{RangeBounds, RangeInclusive}, + sync::Arc, +}; /// Supports various api interfaces for testing purposes. -#[derive(Debug, Clone)] +#[derive(Debug)] #[non_exhaustive] -pub struct NoopBlockReader { +pub struct NoopProvider { chain_spec: Arc, + _phantom: PhantomData, +} + +impl NoopProvider { + /// Create a new instance for specific primitive types. + pub fn new(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } } -impl NoopBlockReader { +impl NoopProvider { /// Create a new instance of the `NoopBlockReader`. - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + pub fn eth(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } +} + +impl NoopProvider { + /// Create a new instance of the [`NoopProvider`] with the mainnet chain spec. + pub fn mainnet() -> Self { + Self::eth(MAINNET.clone()) + } +} + +impl Default for NoopProvider { + fn default() -> Self { + Self::mainnet() + } +} + +impl Clone for NoopProvider { + fn clone(&self) -> Self { + Self { chain_spec: Arc::clone(&self.chain_spec), _phantom: Default::default() } } } /// Noop implementation for testing purposes -impl BlockHashReader for NoopBlockReader { +impl BlockHashReader for NoopProvider { fn block_hash(&self, _number: u64) -> ProviderResult> { Ok(None) } @@ -36,7 +89,7 @@ impl BlockHashReader for NoopBlockReader { } } -impl BlockNumReader for NoopBlockReader { +impl BlockNumReader for NoopProvider { fn chain_info(&self) -> ProviderResult { Ok(ChainInfo::default()) } @@ -54,10 +107,461 @@ impl BlockNumReader for NoopBlockReader { } } -impl ChainSpecProvider for NoopBlockReader { +impl ChainSpecProvider + for NoopProvider +{ type ChainSpec = ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } + +impl BlockIdReader for NoopProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } +} + +impl BlockReaderIdExt for NoopProvider { + fn block_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn sealed_header_by_id( + &self, + _id: BlockId, + ) -> ProviderResult>> { + Ok(None) + } + + fn header_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn ommers_by_id(&self, _id: BlockId) -> ProviderResult>> { + Ok(None) + } +} + +impl BlockReader for NoopProvider { + type Block = N::Block; + + fn find_block_by_hash( + &self, + _hash: B256, + _source: BlockSource, + ) -> ProviderResult> { + Ok(None) + } + + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + Ok(None) + } + + fn pending_block(&self) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + Ok(None) + } + + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + Ok(None) + } + + fn block_body_indices(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + Ok(vec![]) + } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl TransactionsProvider for NoopProvider { + type Transaction = N::SignedTx; + + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id_unhashed( + &self, + _id: TxNumber, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash_with_meta( + &self, + _hash: TxHash, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { + todo!() + } + + fn transactions_by_block( + &self, + _block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn transactions_by_block_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult>> { + Ok(Vec::default()) + } + + fn transactions_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } +} + +impl ReceiptProvider for NoopProvider { + type Receipt = N::Receipt; + + fn receipt(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn receipts_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } +} + +impl ReceiptProviderIdExt for NoopProvider {} + +impl HeaderProvider for NoopProvider { + type Header = N::BlockHeader; + + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_by_number(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { + Ok(None) + } + + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } + + fn sealed_header( + &self, + _number: BlockNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl AccountReader for NoopProvider { + fn basic_account(&self, _address: Address) -> ProviderResult> { + Ok(None) + } +} + +impl ChangeSetReader for NoopProvider { + fn account_block_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } +} + +impl StateRootProvider for NoopProvider { + fn state_root(&self, _state: HashedPostState) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_with_updates( + &self, + _state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } + + fn state_root_from_nodes_with_updates( + &self, + _input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } +} + +impl StorageRootProvider for NoopProvider { + fn storage_root( + &self, + _address: Address, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(B256::default()) + } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageProof::new(slot)) + } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } +} + +impl StateProofProvider for NoopProvider { + fn proof( + &self, + _input: TrieInput, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } + + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + + fn witness( + &self, + _input: TrieInput, + _target: HashedPostState, + ) -> ProviderResult> { + Ok(HashMap::default()) + } +} + +impl StateProvider for NoopProvider { + fn storage( + &self, + _account: Address, + _storage_key: StorageKey, + ) -> ProviderResult> { + Ok(None) + } + + fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { + Ok(None) + } +} + +// impl EvmEnvProvider for NoopProvider { +// fn fill_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv

, +// { +// Ok(()) +// } +// +// fn fill_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// } + +impl StageCheckpointReader for NoopProvider { + fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult> { + Ok(None) + } + + fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { + Ok(None) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl WithdrawalsProvider for NoopProvider { + fn withdrawals_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + Ok(None) + } + fn latest_withdrawal(&self) -> ProviderResult> { + Ok(None) + } +} + +impl PruneCheckpointReader for NoopProvider { + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { + Ok(None) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl NodePrimitivesProvider for NoopProvider { + type Primitives = N; +} From bfcd98326cde86382b1b0ffa8c7b2089fc6fa311 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 13:38:00 +0100 Subject: [PATCH 843/970] feat: add Op DA config (#13095) --- crates/optimism/payload/src/config.rs | 87 +++++++++++++++++++++++++++ crates/optimism/payload/src/lib.rs | 2 + 2 files changed, 89 insertions(+) create mode 100644 crates/optimism/payload/src/config.rs diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs new file mode 100644 index 00000000000..5055c05c42e --- /dev/null +++ b/crates/optimism/payload/src/config.rs @@ -0,0 +1,87 @@ +//! Additional configuration for the OP builder + +use std::sync::{atomic::AtomicU64, Arc}; + +/// Contains the Data Availability configuration for the OP builder. +#[derive(Debug, Clone, Default)] +pub struct OpDAConfig { + inner: Arc, +} + +impl OpDAConfig { + /// Creates a new Data Availability configuration with the given maximum sizes. + pub fn new(max_da_tx_size: u64, max_da_block_size: u64) -> Self { + let this = Self::default(); + this.set_max_da_size(max_da_tx_size, max_da_block_size); + this + } + + /// Returns the max allowed data availability size per transactions, if any. + pub fn max_da_tx_size(&self) -> Option { + let val = self.inner.max_da_tx_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Returns the max allowed data availability size per block, if any. + pub fn max_da_block_size(&self) -> Option { + let val = self.inner.max_da_block_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Sets the maximum data availability size currently allowed for inclusion. 0 means no maximum. + pub fn set_max_da_size(&self, max_da_tx_size: u64, max_da_block_size: u64) { + self.set_max_tx_size(max_da_tx_size); + self.set_max_block_size(max_da_block_size); + } + + /// Sets the maximum data availability size per transaction currently allowed for inclusion. 0 + /// means no maximum. + pub fn set_max_tx_size(&self, max_da_tx_size: u64) { + self.inner.max_da_tx_size.store(max_da_tx_size, std::sync::atomic::Ordering::Relaxed); + } + + /// Sets the maximum data availability size per block currently allowed for inclusion. 0 means + /// no maximum. + pub fn set_max_block_size(&self, max_da_block_size: u64) { + self.inner.max_da_block_size.store(max_da_block_size, std::sync::atomic::Ordering::Relaxed); + } +} + +#[derive(Debug, Default)] +struct OpDAConfigInner { + /// Don't include any transactions with data availability size larger than this in any built + /// block + /// + /// 0 means no limit. + max_da_tx_size: AtomicU64, + /// Maximum total data availability size for a block + /// + /// 0 means no limit. + max_da_block_size: AtomicU64, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_da() { + let da = OpDAConfig::default(); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + da.set_max_da_size(100, 200); + assert_eq!(da.max_da_tx_size(), Some(100)); + assert_eq!(da.max_da_block_size(), Some(200)); + da.set_max_da_size(0, 0); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + } +} diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 8447026d783..53fad1118fd 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -16,3 +16,5 @@ pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; + +pub mod config; From bedc68e8f4bd26927c1285e64e1cc652a0d839ff Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 13:39:29 +0100 Subject: [PATCH 844/970] chore(evm): migrate execution errors back to `thiserror` (#13097) --- Cargo.lock | 2 +- crates/evm/execution-errors/Cargo.toml | 4 +- crates/evm/execution-errors/src/lib.rs | 96 ++++++++----------------- crates/evm/execution-errors/src/trie.rs | 75 ++++++------------- 4 files changed, 56 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c72a1c8ddcb..8c290d0ea9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7601,12 +7601,12 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", "nybbles", "reth-consensus", "reth-prune-types", "reth-storage-errors", "revm-primitives", + "thiserror 2.0.3", ] [[package]] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 5e1755c0c55..b4b9992a979 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -22,7 +22,7 @@ alloy-eips.workspace = true revm-primitives.workspace = true nybbles.workspace = true -derive_more.workspace = true +thiserror.workspace = true [features] default = ["std"] @@ -32,6 +32,6 @@ std = [ "alloy-primitives/std", "revm-primitives/std", "alloy-rlp/std", - "derive_more/std", + "thiserror/std", "nybbles/std" ] diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 4dbbfb7abdc..db7887d1b8d 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -14,20 +14,20 @@ extern crate alloc; use alloc::{boxed::Box, string::String}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; -use derive_more::{Display, From}; use reth_consensus::ConsensusError; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; use revm_primitives::EVMError; +use thiserror::Error; pub mod trie; pub use trie::*; /// Transaction validation errors -#[derive(Clone, Debug, Display, Eq, PartialEq)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum BlockValidationError { /// EVM error with transaction hash and message - #[display("EVM reported invalid transaction ({hash}): {error}")] + #[error("EVM reported invalid transaction ({hash}): {error}")] EVM { /// The hash of the transaction hash: B256, @@ -35,16 +35,16 @@ pub enum BlockValidationError { error: Box>, }, /// Error when recovering the sender for a transaction - #[display("failed to recover sender for transaction")] + #[error("failed to recover sender for transaction")] SenderRecoveryError, /// Error when incrementing balance in post execution - #[display("incrementing balance in post execution failed")] + #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, /// Error when the state root does not match the expected value. - // #[from(ignore)] - StateRoot(StateRootError), + #[error(transparent)] + StateRoot(#[from] StateRootError), /// Error when transaction gas limit exceeds available block gas - #[display( + #[error( "transaction gas limit {transaction_gas_limit} is more than blocks available gas {block_available_gas}" )] TransactionGasLimitMoreThanAvailableBlockGas { @@ -54,22 +54,22 @@ pub enum BlockValidationError { block_available_gas: u64, }, /// Error for pre-merge block - #[display("block {hash} is pre merge")] + #[error("block {hash} is pre merge")] BlockPreMerge { /// The hash of the block hash: B256, }, /// Error for missing total difficulty - #[display("missing total difficulty for block {hash}")] + #[error("missing total difficulty for block {hash}")] MissingTotalDifficulty { /// The hash of the block hash: B256, }, /// Error for EIP-4788 when parent beacon block root is missing - #[display("EIP-4788 parent beacon block root missing for active Cancun block")] + #[error("EIP-4788 parent beacon block root missing for active Cancun block")] MissingParentBeaconBlockRoot, /// Error for Cancun genesis block when parent beacon block root is not zero - #[display( + #[error( "the parent beacon block root is not zero for Cancun genesis block: {parent_beacon_block_root}" )] CancunGenesisParentBeaconBlockRootNotZero { @@ -79,9 +79,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-4788] beacon root contract call. /// /// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 - #[display( - "failed to apply beacon root contract call at {parent_beacon_block_root}: {message}" - )] + #[error("failed to apply beacon root contract call at {parent_beacon_block_root}: {message}")] BeaconRootContractCall { /// The beacon block root parent_beacon_block_root: Box, @@ -91,7 +89,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-2935] blockhash contract call. /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 - #[display("failed to apply blockhash contract call: {message}")] + #[error("failed to apply blockhash contract call: {message}")] BlockHashContractCall { /// The error message. message: String, @@ -99,7 +97,7 @@ pub enum BlockValidationError { /// EVM error during withdrawal requests contract call [EIP-7002] /// /// [EIP-7002]: https://eips.ethereum.org/EIPS/eip-7002 - #[display("failed to apply withdrawal requests contract call: {message}")] + #[error("failed to apply withdrawal requests contract call: {message}")] WithdrawalRequestsContractCall { /// The error message. message: String, @@ -107,7 +105,7 @@ pub enum BlockValidationError { /// EVM error during consolidation requests contract call [EIP-7251] /// /// [EIP-7251]: https://eips.ethereum.org/EIPS/eip-7251 - #[display("failed to apply consolidation requests contract call: {message}")] + #[error("failed to apply consolidation requests contract call: {message}")] ConsolidationRequestsContractCall { /// The error message. message: String, @@ -115,35 +113,22 @@ pub enum BlockValidationError { /// Error when decoding deposit requests from receipts [EIP-6110] /// /// [EIP-6110]: https://eips.ethereum.org/EIPS/eip-6110 - #[display("failed to decode deposit requests from receipts: {_0}")] + #[error("failed to decode deposit requests from receipts: {_0}")] DepositRequestDecode(String), } -impl From for BlockValidationError { - fn from(error: StateRootError) -> Self { - Self::StateRoot(error) - } -} - -impl core::error::Error for BlockValidationError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::EVM { error, .. } => core::error::Error::source(error), - Self::StateRoot(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} - /// `BlockExecutor` Errors -#[derive(Debug, From, Display)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping [`BlockValidationError`] - Validation(BlockValidationError), + #[error(transparent)] + Validation(#[from] BlockValidationError), /// Consensus error, transparently wrapping [`ConsensusError`] - Consensus(ConsensusError), + #[error(transparent)] + Consensus(#[from] ConsensusError), /// Internal, i.e. non consensus or validation related Block Executor Errors - Internal(InternalBlockExecutionError), + #[error(transparent)] + Internal(#[from] InternalBlockExecutionError), } impl BlockExecutionError { @@ -184,24 +169,14 @@ impl From for BlockExecutionError { } } -impl core::error::Error for BlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Validation(source) => core::error::Error::source(source), - Self::Consensus(source) => core::error::Error::source(source), - Self::Internal(source) => core::error::Error::source(source), - } - } -} - /// Internal (i.e., not validation or consensus related) `BlockExecutor` Errors -#[derive(Display, Debug, From)] +#[derive(Error, Debug)] pub enum InternalBlockExecutionError { /// Pruning error, transparently wrapping [`PruneSegmentError`] - #[from] - Pruning(PruneSegmentError), + #[error(transparent)] + Pruning(#[from] PruneSegmentError), /// Error when appending chain on fork is not possible - #[display( + #[error( "appending chain on fork (other_chain_fork:?) is not possible as the tip is {chain_tip:?}" )] AppendChainDoesntConnect { @@ -211,9 +186,10 @@ pub enum InternalBlockExecutionError { other_chain_fork: Box, }, /// Error when fetching latest block state. - #[from] - LatestBlock(ProviderError), + #[error(transparent)] + LatestBlock(#[from] ProviderError), /// Arbitrary Block Executor Errors + #[error(transparent)] Other(Box), } @@ -233,13 +209,3 @@ impl InternalBlockExecutionError { Self::Other(msg.to_string().into()) } } - -impl core::error::Error for InternalBlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Pruning(source) => core::error::Error::source(source), - Self::LatestBlock(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 9e4b16d8d0c..4d3398e4161 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -2,26 +2,19 @@ use alloc::string::ToString; use alloy_primitives::B256; -use derive_more::{Display, From}; use nybbles::Nibbles; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; +use thiserror::Error; /// State root errors. -#[derive(Display, Debug, From, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// Storage root error. - StorageRootError(StorageRootError), -} - -impl core::error::Error for StateRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::StorageRootError(source) => core::error::Error::source(source), - } - } + #[error(transparent)] + StorageRootError(#[from] StorageRootError), } impl From for DatabaseError { @@ -34,10 +27,11 @@ impl From for DatabaseError { } /// Storage root error. -#[derive(Display, From, PartialEq, Eq, Clone, Debug)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StorageRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), } impl From for DatabaseError { @@ -48,21 +42,15 @@ impl From for DatabaseError { } } -impl core::error::Error for StorageRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - } - } -} - /// State proof errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateProofError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// RLP decoding error. - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), } impl From for ProviderError { @@ -74,32 +62,23 @@ impl From for ProviderError { } } -impl core::error::Error for StateProofError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - } - } -} - /// Trie witness errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum TrieWitnessError { /// Error gather proofs. - #[from] - Proof(StateProofError), + #[error(transparent)] + Proof(#[from] StateProofError), /// RLP decoding error. - #[from] - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), /// Missing account. - #[display("missing account {_0}")] + #[error("missing account {_0}")] MissingAccount(B256), /// Missing target node. - #[display("target node missing from proof {_0:?}")] + #[error("target node missing from proof {_0:?}")] MissingTargetNode(Nibbles), /// Unexpected empty root. - #[display("unexpected empty root: {_0:?}")] + #[error("unexpected empty root: {_0:?}")] UnexpectedEmptyRoot(Nibbles), } @@ -108,13 +87,3 @@ impl From for ProviderError { Self::TrieWitnessError(error.to_string()) } } - -impl core::error::Error for TrieWitnessError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Proof(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} From e4c0f192ee971c3eee5204557b8ff1acbff44b83 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 14:08:54 +0100 Subject: [PATCH 845/970] chore(trie): exclude blinded providers from `Debug` impl (#13098) --- crates/trie/sparse/src/state.rs | 16 ++++++++++++++-- crates/trie/sparse/src/trie.rs | 13 +++++++++++-- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index cf0bc20abe4..58bb484e7f4 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -3,6 +3,7 @@ use crate::{ RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, }; use alloy_primitives::{ + hex, map::{HashMap, HashSet}, Bytes, B256, }; @@ -13,10 +14,9 @@ use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use std::iter::Peekable; +use std::{fmt, iter::Peekable}; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Debug)] pub struct SparseStateTrie { /// Blinded node provider factory. provider_factory: F, @@ -45,6 +45,18 @@ impl Default for SparseStateTrie { } } +impl fmt::Debug for SparseStateTrie { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SparseStateTrie") + .field("state", &self.state) + .field("storages", &self.storages) + .field("revealed", &self.revealed) + .field("retain_updates", &self.retain_updates) + .field("account_rlp_buf", &hex::encode(&self.account_rlp_buf)) + .finish_non_exhaustive() + } +} + impl SparseStateTrie { /// Create state trie from state trie. pub fn from_state(state: SparseTrie) -> Self { diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index dd609a77c99..cc6f110e021 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -19,7 +19,7 @@ use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq)] pub enum SparseTrie

{ /// None of the trie nodes are known. Blind, @@ -27,6 +27,15 @@ pub enum SparseTrie

{ Revealed(Box>), } +impl

fmt::Debug for SparseTrie

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Blind => write!(f, "Blind"), + Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), + } + } +} + impl

Default for SparseTrie

{ fn default() -> Self { Self::Blind @@ -164,7 +173,7 @@ impl

fmt::Debug for RevealedSparseTrie

{ .field("prefix_set", &self.prefix_set) .field("updates", &self.updates) .field("rlp_buf", &hex::encode(&self.rlp_buf)) - .finish() + .finish_non_exhaustive() } } From 1404073e053fa2831ddd1a428db932f7e4b152c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 14:12:13 +0100 Subject: [PATCH 846/970] feat: add miner rpc bindings (#13099) --- crates/rpc/rpc-api/src/lib.rs | 3 +++ crates/rpc/rpc-api/src/miner.rs | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 crates/rpc/rpc-api/src/miner.rs diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index ac39b4802a8..098214f103f 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -21,6 +21,7 @@ mod engine; mod ganache; mod hardhat; mod mev; +mod miner; mod net; mod otterscan; mod reth; @@ -40,6 +41,7 @@ pub mod servers { debug::{DebugApiServer, DebugExecutionWitnessApiServer}, engine::{EngineApiServer, EngineEthApiServer}, mev::{MevFullApiServer, MevSimApiServer}, + miner::MinerApiServer, net::NetApiServer, otterscan::OtterscanServer, reth::RethApiServer, @@ -70,6 +72,7 @@ pub mod clients { ganache::GanacheApiClient, hardhat::HardhatApiClient, mev::{MevFullApiClient, MevSimApiClient}, + miner::MinerApiClient, net::NetApiClient, otterscan::OtterscanClient, reth::RethApiClient, diff --git a/crates/rpc/rpc-api/src/miner.rs b/crates/rpc/rpc-api/src/miner.rs new file mode 100644 index 00000000000..3673b51c6eb --- /dev/null +++ b/crates/rpc/rpc-api/src/miner.rs @@ -0,0 +1,21 @@ +use alloy_primitives::{Bytes, U128}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +/// Miner namespace rpc interface that can control miner/builder settings +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "miner"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "miner"))] +pub trait MinerApi { + /// Sets the extra data string that is included when this miner mines a block. + /// + /// Returns an error if the extra data is too long. + #[method(name = "setExtra")] + fn set_extra(&self, record: Bytes) -> RpcResult; + + /// Sets the minimum accepted gas price for the miner. + #[method(name = "setGasPrice")] + fn set_gas_price(&self, gas_price: U128) -> RpcResult; + + /// Sets the gaslimit to target towards during mining. + #[method(name = "setGasLimit")] + fn set_gas_limit(&self, gas_price: U128) -> RpcResult; +} From a8feec839f0a52ab489999a7fbcf5c161aedb811 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 3 Dec 2024 13:22:25 +0000 Subject: [PATCH 847/970] perf(engine): do not clone proof targets (#13061) --- crates/engine/tree/src/tree/root.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 70f54ce4d48..d65d1f89000 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -216,10 +216,10 @@ where view: ConsistentDbView, input: Arc, update: EvmState, - fetched_proof_targets: &HashMap>, + fetched_proof_targets: &mut HashMap>, proof_sequence_number: u64, state_root_message_sender: Sender, - ) -> HashMap> { + ) { let mut hashed_state_update = HashedPostState::default(); for (address, account) in update { if account.is_touched() { @@ -249,9 +249,11 @@ where } let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + for (address, slots) in &proof_targets { + fetched_proof_targets.entry(*address).or_default().extend(slots) + } // Dispatch proof gathering for this state update - let targets = proof_targets.clone(); rayon::spawn(move || { let provider = match view.provider_ro() { Ok(provider) => provider, @@ -266,7 +268,7 @@ where provider.tx_ref(), // TODO(alexey): this clone can be expensive, we should avoid it input.as_ref().clone(), - targets, + proof_targets, ); match result { Ok(proof) => { @@ -281,8 +283,6 @@ where } } }); - - proof_targets } /// Handler for new proof calculated, aggregates all the existing sequential proofs. @@ -357,17 +357,14 @@ where total_updates = updates_received, "Received new state update" ); - let targets = Self::on_state_update( + Self::on_state_update( self.config.consistent_view.clone(), self.config.input.clone(), update, - &self.fetched_proof_targets, + &mut self.fetched_proof_targets, self.proof_sequencer.next_sequence(), self.tx.clone(), ); - for (address, slots) in targets { - self.fetched_proof_targets.entry(address).or_default().extend(slots) - } } StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { proofs_processed += 1; From 0aa4701d30f5c195d6f4958cdfb655df697366c8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 14:40:29 +0100 Subject: [PATCH 848/970] fix(trie): short circuit leaf removal if missing (#12988) Co-authored-by: Alexey Shekhirin --- crates/trie/sparse/src/trie.rs | 40 +++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index cc6f110e021..b5064fa2c47 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -823,8 +823,16 @@ where { /// Remove leaf node from the trie. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + if self.values.remove(path).is_none() { + if let Some(SparseNode::Hash(hash)) = self.nodes.get(path) { + // Leaf is present in the trie, but it's blinded. + return Err(SparseTrieError::BlindedNode { path: path.clone(), hash: *hash }) + } + + // Leaf is not present in the trie. + return Ok(()) + } self.prefix_set.insert(path.clone()); - self.values.remove(path); // If the path wasn't present in `values`, we still need to walk the trie and ensure that // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry @@ -1731,6 +1739,36 @@ mod tests { ); } + #[test] + fn sparse_trie_remove_leaf_non_existent() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + + // Removing a non-existent leaf should be a noop + let sparse_old = sparse.clone(); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2])), Ok(())); + assert_eq!(sparse, sparse_old); + } + #[allow(clippy::type_complexity)] #[test] fn sparse_trie_fuzz() { From ca3d9895e2120bb959bc41f2197bf9db74ceee8f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 14:50:59 +0100 Subject: [PATCH 849/970] feat: impl noop miner api endpoint (#13102) --- book/cli/reth/node.md | 4 ++-- crates/rpc/rpc-builder/src/lib.rs | 5 +++-- crates/rpc/rpc-server-types/src/module.rs | 2 ++ crates/rpc/rpc/src/lib.rs | 2 ++ crates/rpc/rpc/src/miner.rs | 25 +++++++++++++++++++++++ 5 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 crates/rpc/rpc/src/miner.rs diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index d8b66588056..cf05ae66e28 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --ipcdisable Disable the IPC-RPC server diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 99912eddf97..0b9a84a5b98 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -210,8 +210,8 @@ use reth_provider::{ EvmEnvProvider, FullRpcProvider, HeaderProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ - AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, + TraceApi, TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -1499,6 +1499,7 @@ where ) .into_rpc() .into(), + RethRpcModule::Miner => MinerApi::default().into_rpc().into(), }) .clone() }) diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 43e4a937436..3eb34b34a7f 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -269,6 +269,8 @@ pub enum RethRpcModule { Ots, /// `flashbots_` module Flashbots, + /// `miner_` module + Miner, } // === impl RethRpcModule === diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 76fb96f9162..d957913dffb 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -36,6 +36,7 @@ mod admin; mod debug; mod engine; pub mod eth; +mod miner; mod net; mod otterscan; mod reth; @@ -49,6 +50,7 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthBundle, EthFilter, EthPubSub}; +pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; diff --git a/crates/rpc/rpc/src/miner.rs b/crates/rpc/rpc/src/miner.rs new file mode 100644 index 00000000000..ab8fa5e0cd2 --- /dev/null +++ b/crates/rpc/rpc/src/miner.rs @@ -0,0 +1,25 @@ +use alloy_primitives::{Bytes, U128}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_rpc_api::MinerApiServer; + +/// `miner` API implementation. +/// +/// This type provides the functionality for handling `miner` related requests. +#[derive(Clone, Debug, Default)] +pub struct MinerApi {} + +#[async_trait] +impl MinerApiServer for MinerApi { + fn set_extra(&self, _record: Bytes) -> RpcResult { + Ok(false) + } + + fn set_gas_price(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } + + fn set_gas_limit(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } +} From 61cb3dedcaf236d9bfa1a7e9d01625335536fe6c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 3 Dec 2024 13:51:04 +0000 Subject: [PATCH 850/970] fix(trie): do not persist root branch nodes in sparse trie (#13071) Co-authored-by: Roman Krasiuk --- crates/trie/sparse/src/state.rs | 57 ++-------- crates/trie/sparse/src/trie.rs | 190 ++++++++++++++++++-------------- 2 files changed, 119 insertions(+), 128 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 58bb484e7f4..c73eaa1736b 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -392,10 +392,7 @@ mod tests { use assert_matches::assert_matches; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; - use reth_trie::{ - updates::StorageTrieUpdates, BranchNodeCompact, HashBuilder, TrieAccount, TrieMask, - EMPTY_ROOT_HASH, - }; + use reth_trie::{updates::StorageTrieUpdates, HashBuilder, TrieAccount, EMPTY_ROOT_HASH}; use reth_trie_common::proof::ProofRetainer; #[test] @@ -541,49 +538,15 @@ mod tests { pretty_assertions::assert_eq!( sparse_updates, TrieUpdates { - account_nodes: HashMap::from_iter([ - ( - Nibbles::default(), - BranchNodeCompact { - state_mask: TrieMask::new(0b110), - tree_mask: TrieMask::new(0b000), - hash_mask: TrieMask::new(0b010), - hashes: vec![b256!( - "4c4ffbda3569fcf2c24ea2000b4cec86ef8b92cbf9ff415db43184c0f75a212e" - )], - root_hash: Some(b256!( - "60944bd29458529c3065d19f63c6e3d5269596fd3b04ca2e7b318912dc89ca4c" - )) - }, - ), - ]), - storage_tries: HashMap::from_iter([ - ( - b256!("1000000000000000000000000000000000000000000000000000000000000000"), - StorageTrieUpdates { - is_deleted: false, - storage_nodes: HashMap::from_iter([( - Nibbles::default(), - BranchNodeCompact { - state_mask: TrieMask::new(0b110), - tree_mask: TrieMask::new(0b000), - hash_mask: TrieMask::new(0b010), - hashes: vec![b256!("5bc8b4fdf51839c1e18b8d6a4bd3e2e52c9f641860f0e4d197b68c2679b0e436")], - root_hash: Some(b256!("c44abf1a9e1a92736ac479b20328e8d7998aa8838b6ef52620324c9ce85e3201")) - } - )]), - removed_nodes: HashSet::default() - } - ), - ( - b256!("1100000000000000000000000000000000000000000000000000000000000000"), - StorageTrieUpdates { - is_deleted: true, - storage_nodes: HashMap::default(), - removed_nodes: HashSet::default() - } - ) - ]), + account_nodes: HashMap::default(), + storage_tries: HashMap::from_iter([( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + )]), removed_nodes: HashSet::default() } ); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b5064fa2c47..1b4c019b1e9 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -764,7 +764,11 @@ impl

RevealedSparseTrie

{ let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut() { + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| !path.is_empty()) + { let mut tree_mask_values = tree_mask_values.into_iter().rev(); let mut hash_mask_values = hash_mask_values.into_iter().rev(); let mut tree_mask = TrieMask::default(); @@ -1181,6 +1185,7 @@ mod tests { hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::noop::NoopAccountTrieCursor, + updates::TrieUpdates, walker::TrieWalker, BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, }; @@ -1210,8 +1215,9 @@ mod tests { /// Returns the state root and the retained proof nodes. fn run_hash_builder( state: impl IntoIterator + Clone, + destroyed_accounts: HashSet, proof_targets: impl IntoIterator, - ) -> HashBuilder { + ) -> (B256, TrieUpdates, ProofNodes) { let mut account_rlp = Vec::new(); let mut hash_builder = HashBuilder::default() @@ -1249,9 +1255,14 @@ mod tests { } } } - hash_builder.root(); + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + + let mut trie_updates = TrieUpdates::default(); + let removed_keys = node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); - hash_builder + (root, trie_updates, proof_nodes) } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -1313,16 +1324,17 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder([(key.clone(), value())], [key.clone()]); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder([(key.clone(), value())], Default::default(), [key.clone()]); let mut sparse = RevealedSparseTrie::default().with_updates(true); sparse.update_leaf(key, value_encoded()).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1337,8 +1349,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1349,9 +1362,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1364,8 +1377,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1376,9 +1390,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1399,8 +1413,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1411,12 +1426,12 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder.updated_branch_nodes.take().unwrap()) + BTreeMap::from_iter(hash_builder_updates.account_nodes) ); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1435,8 +1450,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), + Default::default(), paths.clone(), ); @@ -1447,12 +1463,13 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.updates_ref(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), + Default::default(), paths.clone(), ); @@ -1462,9 +1479,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1799,21 +1816,22 @@ mod tests { // Insert state updates into the hash builder and calculate the root state.extend(update); - let mut hash_builder = - run_hash_builder(state.clone(), state.keys().cloned().collect::>()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( sparse_updates.updated_nodes, - hash_builder.updated_branch_nodes.take().unwrap() + hash_builder_updates.account_nodes ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes( - &updated_sparse, - hash_builder.take_proof_nodes(), - ); + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); // Delete some keys from both the hash builder and the sparse trie and check // that the sparse trie root still matches the hash builder root @@ -1829,21 +1847,22 @@ mod tests { let sparse_root = updated_sparse.root(); let sparse_updates = updated_sparse.take_updates(); - let mut hash_builder = - run_hash_builder(state.clone(), state.keys().cloned().collect::>()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( sparse_updates.updated_nodes, - hash_builder.updated_branch_nodes.take().unwrap() + hash_builder_updates.account_nodes ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes( - &updated_sparse, - hash_builder.take_proof_nodes(), - ); + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); } } } @@ -1909,19 +1928,21 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = - run_hash_builder([(key1(), value()), (key3(), value())], [Nibbles::default()]) - .take_proof_nodes(); + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key3(), value())], [key1()]).take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1941,9 +1962,9 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let proof_nodes_3 = - run_hash_builder([(key1(), value()), (key3(), value())], [key3()]).take_proof_nodes(); - for (path, node) in proof_nodes_3.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key3()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1955,13 +1976,13 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [key1(), key2(), key3()], - ) - .take_proof_nodes(); + ); - assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we @@ -1982,25 +2003,25 @@ mod tests { let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [Nibbles::default()], - ) - .take_proof_nodes(); + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ) - .take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -2020,10 +2041,12 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value()), (key3(), value())], [key2()]) - .take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key2()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -2055,11 +2078,13 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]) - .take_proof_nodes(); + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); @@ -2080,9 +2105,9 @@ mod tests { ); // Generate the proof for the first key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value())], [key1()]).take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key2(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -2177,16 +2202,19 @@ mod tests { account_rlp }; - let mut hash_builder = - run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]); + let (hash_builder_root, hash_builder_updates, _) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::default(); sparse.update_leaf(key1(), value_encoded()).unwrap(); sparse.update_leaf(key2(), value_encoded()).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); } #[test] From 8f61af0136e1a20119832925081c341ae89b93f0 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Tue, 3 Dec 2024 09:07:43 -0500 Subject: [PATCH 851/970] introduce tableset and impl for tables enum (#12617) Co-authored-by: dkathiriya Co-authored-by: Matthias Seitz --- crates/storage/db/src/tables/mod.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 9ff21261eee..8a11c4ac055 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -98,6 +98,15 @@ pub trait TableViewer { } } +/// General trait for defining the set of tables +/// Used to initialize database +pub trait TableSet { + /// Returns all the table names in the database. + fn table_names(&self) -> Vec<&'static str>; + /// Returns `true` if the table at the given index is a `DUPSORT` table. + fn is_dupsort(&self, idx: usize) -> bool; +} + /// Defines all the tables in the database. #[macro_export] macro_rules! tables { @@ -243,6 +252,18 @@ macro_rules! tables { } } + impl TableSet for Tables { + fn table_names(&self) -> Vec<&'static str> { + //vec![$(table_names::$name,)*] + Self::ALL.iter().map(|t| t.name()).collect() + } + + fn is_dupsort(&self, idx: usize) -> bool { + let table: Self = self.table_names()[idx].parse().expect("should be valid table name"); + table.is_dupsort() + } + } + // Need constants to match on in the `FromStr` implementation. #[allow(non_upper_case_globals)] mod table_names { From 39f936ede20ee8fc08e87528dbd7215856cae726 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 15:20:15 +0100 Subject: [PATCH 852/970] chore: move sparse errors to `reth-execution-errors` (#13101) --- Cargo.lock | 1 + crates/engine/tree/src/tree/root.rs | 5 +- crates/evm/execution-errors/src/trie.rs | 59 +++++++++++++++++++++++- crates/trie/sparse/Cargo.toml | 3 +- crates/trie/sparse/src/blinded.rs | 2 +- crates/trie/sparse/src/errors.rs | 61 ------------------------- crates/trie/sparse/src/lib.rs | 10 ++-- crates/trie/sparse/src/state.rs | 3 +- crates/trie/sparse/src/trie.rs | 6 +-- crates/trie/trie/src/proof/blinded.rs | 6 +-- 10 files changed, 78 insertions(+), 78 deletions(-) delete mode 100644 crates/trie/sparse/src/errors.rs diff --git a/Cargo.lock b/Cargo.lock index 8c290d0ea9f..0f001fea092 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9537,6 +9537,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", + "reth-execution-errors", "reth-primitives-traits", "reth-testing-utils", "reth-tracing", diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index d65d1f89000..78a8332b5eb 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -10,7 +10,10 @@ use reth_trie::{ }; use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; -use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult, SparseTrieError}; +use reth_trie_sparse::{ + errors::{SparseStateTrieResult, SparseTrieError}, + SparseStateTrie, +}; use revm_primitives::{keccak256, EvmState, B256}; use std::{ collections::BTreeMap, diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 4d3398e4161..83210faab52 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,7 +1,7 @@ //! Errors when computing the state root. -use alloc::string::ToString; -use alloy_primitives::B256; +use alloc::{boxed::Box, string::ToString}; +use alloy_primitives::{Bytes, B256}; use nybbles::Nibbles; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use thiserror::Error; @@ -62,6 +62,61 @@ impl From for ProviderError { } } +/// Result type with [`SparseStateTrieError`] as error. +pub type SparseStateTrieResult = Result; + +/// Error encountered in `SparseStateTrie`. +#[derive(Error, Debug)] +pub enum SparseStateTrieError { + /// Encountered invalid root node. + #[error("invalid root node at {path:?}: {node:?}")] + InvalidRootNode { + /// Path to first proof node. + path: Nibbles, + /// Encoded first proof node. + node: Bytes, + }, + /// Sparse trie error. + #[error(transparent)] + Sparse(#[from] SparseTrieError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +/// Result type with [`SparseTrieError`] as error. +pub type SparseTrieResult = Result; + +/// Error encountered in `SparseTrie`. +#[derive(Error, Debug)] +pub enum SparseTrieError { + /// Sparse trie is still blind. Thrown on attempt to update it. + #[error("sparse trie is blind")] + Blind, + /// Encountered blinded node on update. + #[error("attempted to update blind node at {path:?}: {hash}")] + BlindedNode { + /// Blind node path. + path: Nibbles, + /// Node hash + hash: B256, + }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), + /// Other. + #[error(transparent)] + Other(#[from] Box), +} + /// Trie witness errors. #[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum TrieWitnessError { diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index efd68020ccd..09826e41084 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -15,6 +15,7 @@ workspace = true [dependencies] # reth reth-primitives-traits.workspace = true +reth-execution-errors.workspace = true reth-trie-common.workspace = true reth-tracing.workspace = true @@ -28,9 +29,9 @@ thiserror.workspace = true [dev-dependencies] reth-primitives-traits = { workspace = true, features = ["arbitrary"] } -reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-testing-utils.workspace = true arbitrary.workspace = true assert_matches.workspace = true diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index 4cd88bd92a2..22471cf99ff 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -1,7 +1,7 @@ //! Traits and default implementations related to retrieval of blinded trie nodes. -use crate::SparseTrieError; use alloy_primitives::{Bytes, B256}; +use reth_execution_errors::SparseTrieError; use reth_trie_common::Nibbles; /// Factory for instantiating blinded node providers. diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs deleted file mode 100644 index 46102d2d468..00000000000 --- a/crates/trie/sparse/src/errors.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! Errors for sparse trie. - -use crate::SparseNode; -use alloy_primitives::{Bytes, B256}; -use reth_trie_common::Nibbles; -use thiserror::Error; - -/// Result type with [`SparseStateTrieError`] as error. -pub type SparseStateTrieResult = Result; - -/// Error encountered in [`crate::SparseStateTrie`]. -#[derive(Error, Debug)] -pub enum SparseStateTrieError { - /// Encountered invalid root node. - #[error("invalid root node at {path:?}: {node:?}")] - InvalidRootNode { - /// Path to first proof node. - path: Nibbles, - /// Encoded first proof node. - node: Bytes, - }, - /// Sparse trie error. - #[error(transparent)] - Sparse(#[from] SparseTrieError), - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} - -/// Result type with [`SparseTrieError`] as error. -pub type SparseTrieResult = Result; - -/// Error encountered in [`crate::SparseTrie`]. -#[derive(Error, Debug)] -pub enum SparseTrieError { - /// Sparse trie is still blind. Thrown on attempt to update it. - #[error("sparse trie is blind")] - Blind, - /// Encountered blinded node on update. - #[error("attempted to update blind node at {path:?}: {hash}")] - BlindedNode { - /// Blind node path. - path: Nibbles, - /// Node hash - hash: B256, - }, - /// Encountered unexpected node at path when revealing. - #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] - Reveal { - /// Path to the node. - path: Nibbles, - /// Node that was at the path when revealing. - node: Box, - }, - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), - /// Other. - #[error(transparent)] - Other(#[from] Box), -} diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index ec5117fdbc1..1a0f3f73648 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -6,7 +6,11 @@ pub use state::*; mod trie; pub use trie::*; -mod errors; -pub use errors::*; - pub mod blinded; + +/// Re-export sparse trie error types. +pub mod errors { + pub use reth_execution_errors::{ + SparseStateTrieError, SparseStateTrieResult, SparseTrieError, SparseTrieResult, + }; +} diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index c73eaa1736b..85116868f33 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,6 +1,6 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, - RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, + RevealedSparseTrie, SparseTrie, }; use alloy_primitives::{ hex, @@ -8,6 +8,7 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; +use reth_execution_errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieError}; use reth_primitives_traits::Account; use reth_tracing::tracing::trace; use reth_trie_common::{ diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 1b4c019b1e9..df5dd25486c 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,13 +1,11 @@ -use crate::{ - blinded::{BlindedProvider, DefaultBlindedProvider}, - SparseTrieError, SparseTrieResult, -}; +use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; use alloy_primitives::{ hex, keccak256, map::{HashMap, HashSet}, B256, }; use alloy_rlp::Decodable; +use reth_execution_errors::{SparseTrieError, SparseTrieResult}; use reth_tracing::tracing::trace; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index adcaef46b08..5fd3ecdc08e 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -4,11 +4,9 @@ use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, }; +use reth_execution_errors::SparseTrieError; use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; -use reth_trie_sparse::{ - blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}, - SparseTrieError, -}; +use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; use std::sync::Arc; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. From 6baf519c9e0938b945e07ee5d90e844919f8ba25 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:27:20 +0100 Subject: [PATCH 853/970] feat(custom-tables): don't record metrics for custom tables (#13104) --- crates/storage/db/src/metrics.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index 2d908c68156..ed265d6e3aa 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -104,10 +104,11 @@ impl DatabaseEnvMetrics { value_size: Option, f: impl FnOnce() -> R, ) -> R { - self.operations - .get(&(table, operation)) - .expect("operation & table metric handle not found") - .record(value_size, f) + if let Some(metrics) = self.operations.get(&(table, operation)) { + metrics.record(value_size, f) + } else { + f() + } } /// Record metrics for opening a database transaction. From 7008ac22df00f6bf3035d655388b1a32586d9343 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Tue, 3 Dec 2024 21:53:07 +0700 Subject: [PATCH 854/970] perf(validate-tx-pool): fast non-allocating `is_local` (#13096) --- crates/primitives/src/transaction/mod.rs | 5 +++++ crates/transaction-pool/src/config.rs | 20 +++++++++---------- crates/transaction-pool/src/pool/txpool.rs | 2 +- .../transaction-pool/src/test_utils/mock.rs | 4 ++++ crates/transaction-pool/src/traits.rs | 8 ++++++++ crates/transaction-pool/src/validate/eth.rs | 2 +- crates/transaction-pool/src/validate/mod.rs | 5 +++++ 7 files changed, 34 insertions(+), 12 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 6fd8fb55a6d..aaa6b82dc4e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1624,6 +1624,11 @@ impl RecoveredTx { self.signer } + /// Reference to the signer of transaction recovered from signature + pub const fn signer_ref(&self) -> &Address { + &self.signer + } + /// Returns a reference to [`TransactionSigned`] pub const fn as_signed(&self) -> &T { &self.signed_transaction diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 212df34bd37..a9603215c83 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -196,15 +196,15 @@ impl LocalTransactionConfig { /// Returns whether the local addresses vector contains the given address. #[inline] - pub fn contains_local_address(&self, address: Address) -> bool { - self.local_addresses.contains(&address) + pub fn contains_local_address(&self, address: &Address) -> bool { + self.local_addresses.contains(address) } /// Returns whether the particular transaction should be considered local. /// /// This always returns false if the local exemptions are disabled. #[inline] - pub fn is_local(&self, origin: TransactionOrigin, sender: Address) -> bool { + pub fn is_local(&self, origin: TransactionOrigin, sender: &Address) -> bool { if self.no_local_exemptions() { return false } @@ -286,10 +286,10 @@ mod tests { let config = LocalTransactionConfig { local_addresses, ..Default::default() }; // Should contain the inserted address - assert!(config.contains_local_address(address)); + assert!(config.contains_local_address(&address)); // Should not contain another random address - assert!(!config.contains_local_address(Address::new([2; 20]))); + assert!(!config.contains_local_address(&Address::new([2; 20]))); } #[test] @@ -302,7 +302,7 @@ mod tests { }; // Should return false as no exemptions is set to true - assert!(!config.is_local(TransactionOrigin::Local, address)); + assert!(!config.is_local(TransactionOrigin::Local, &address)); } #[test] @@ -315,13 +315,13 @@ mod tests { LocalTransactionConfig { no_exemptions: false, local_addresses, ..Default::default() }; // Should return true as the transaction origin is local - assert!(config.is_local(TransactionOrigin::Local, Address::new([2; 20]))); - assert!(config.is_local(TransactionOrigin::Local, address)); + assert!(config.is_local(TransactionOrigin::Local, &Address::new([2; 20]))); + assert!(config.is_local(TransactionOrigin::Local, &address)); // Should return true as the address is in the local_addresses set - assert!(config.is_local(TransactionOrigin::External, address)); + assert!(config.is_local(TransactionOrigin::External, &address)); // Should return false as the address is not in the local_addresses set - assert!(!config.is_local(TransactionOrigin::External, Address::new([2; 20]))); + assert!(!config.is_local(TransactionOrigin::External, &Address::new([2; 20]))); } #[test] diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 86bf5f741c3..c9d4e0a488e 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1462,7 +1462,7 @@ impl AllTransactions { transaction: ValidPoolTransaction, on_chain_nonce: u64, ) -> Result, InsertErr> { - if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { + if !self.local_transactions_config.is_local(transaction.origin, transaction.sender_ref()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 471956fdca5..0e8b26faf83 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -624,6 +624,10 @@ impl PoolTransaction for MockTransaction { *self.get_sender() } + fn sender_ref(&self) -> &Address { + self.get_sender() + } + fn nonce(&self) -> u64 { *self.get_nonce() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6d4d562b3bd..15f824e7d43 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1023,6 +1023,9 @@ pub trait PoolTransaction: /// The Sender of the transaction. fn sender(&self) -> Address; + /// Reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address; + /// Returns the nonce for this transaction. fn nonce(&self) -> u64; @@ -1277,6 +1280,11 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.signer() } + /// Returns a reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address { + self.transaction.signer_ref() + } + /// Returns the nonce for this transaction. fn nonce(&self) -> u64 { self.transaction.nonce() diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 5249c1befa2..e3b7af736cd 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -263,7 +263,7 @@ where // Drop non-local transactions with a fee lower than the configured fee for acceptance into // the pool. - if !self.local_transactions_config.is_local(origin, transaction.sender()) && + if !self.local_transactions_config.is_local(origin, transaction.sender_ref()) && transaction.is_eip1559() && transaction.max_priority_fee_per_gas() < self.minimum_priority_fee { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 6c625373401..84caae6e7ca 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -282,6 +282,11 @@ impl ValidPoolTransaction { self.transaction.sender() } + /// Returns a reference to the address of the sender + pub fn sender_ref(&self) -> &Address { + self.transaction.sender_ref() + } + /// Returns the recipient of the transaction if it is not a CREATE transaction. pub fn to(&self) -> Option

{ self.transaction.to() From 9d5e159968737950473a572cdce3881af033f9fc Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 3 Dec 2024 19:38:10 +0400 Subject: [PATCH 855/970] feat: make InvalidBlockHook generic over NodePrimitives (#13105) --- Cargo.lock | 1 - .../engine/invalid-block-hooks/src/witness.rs | 67 ++++++++++--------- crates/engine/local/src/service.rs | 2 +- .../primitives/src/invalid_block_hook.rs | 25 +++---- crates/engine/service/src/service.rs | 2 +- .../tree/src/tree/invalid_block_hook.rs | 22 +++--- crates/engine/tree/src/tree/mod.rs | 9 ++- crates/ethereum/evm/src/execute.rs | 4 +- crates/evm/src/state_change.rs | 38 +++++++---- crates/evm/src/system_calls/eip2935.rs | 3 +- crates/evm/src/system_calls/eip4788.rs | 3 +- crates/evm/src/system_calls/eip7002.rs | 3 +- crates/evm/src/system_calls/eip7251.rs | 3 +- crates/evm/src/system_calls/mod.rs | 20 +++--- crates/node/api/Cargo.toml | 1 - crates/node/api/src/node.rs | 5 +- crates/node/builder/src/components/builder.rs | 5 +- crates/node/builder/src/components/execute.rs | 7 +- crates/node/builder/src/components/mod.rs | 9 ++- crates/node/builder/src/launch/common.rs | 17 +++-- crates/optimism/evm/src/execute.rs | 2 +- 21 files changed, 133 insertions(+), 115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f001fea092..91ab9d891f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7998,7 +7998,6 @@ dependencies = [ name = "reth-node-api" version = "1.1.2" dependencies = [ - "alloy-consensus", "alloy-rpc-types-engine", "eyre", "reth-beacon-consensus", diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 98ee8dd2d13..08681a9d17a 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,4 +1,4 @@ -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; @@ -8,8 +8,8 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader, TransactionSigned}; +use reth_primitives_traits::{HeaderTy, SignedTransaction}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::states::bundle_state::BundleRetention, @@ -54,15 +54,18 @@ where + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, { - fn on_invalid_block( + fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { + ) -> eyre::Result<()> + where + N: NodePrimitives, + EvmConfig: ConfigureEvm
, + { // TODO(alexey): unify with `DebugApi::debug_execution_witness` // Setup database. @@ -86,7 +89,7 @@ where SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. - system_caller.apply_pre_execution_changes(&block.clone().unseal(), &mut evm)?; + system_caller.apply_pre_execution_changes(&block.clone().unseal().block, &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. @@ -106,7 +109,7 @@ where // NOTE: This is not mut because we are not doing the DAO irregular state change here let balance_increments = post_block_balance_increments( self.provider.chain_spec().as_ref(), - &block.block.clone().unseal(), + &block.clone().unseal().block, U256::MAX, ); @@ -163,24 +166,24 @@ where keys: state_preimages, }; let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number, block.hash()), + format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), &response, )?; if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness(healthy_node_client, block.number.into()) + DebugApiClient::debug_execution_witness(healthy_node_client, block.number().into()) .await })?; let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number, block.hash()), + format!("{}_{}.witness.healthy.json", block.number(), block.hash()), &healthy_node_witness, )?; // If the witnesses are different, write the diff to the output directory. if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number, block.hash()); + let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", @@ -210,15 +213,15 @@ where if bundle_state != output.state { let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number, block.hash()), + format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), &output.state, )?; let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number, block.hash()), + format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), &bundle_state, )?; - let filename = format!("{}_{}.bundle_state.diff", block.number, block.hash()); + let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &bundle_state, &output.state)?; warn!( @@ -236,26 +239,27 @@ where state_provider.state_root_with_updates(hashed_state)?; if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number, block.hash()); + let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } // If the re-executed state root does not match the _header_ state root, also log that. - if re_executed_root != block.state_root { - let filename = format!("{}_{}.header_state_root.diff", block.number, block.hash()); - let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root)?; - warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root, ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); + if re_executed_root != block.state_root() { + let filename = + format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; + warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { // Trie updates are too big to diff, so we just save the original and re-executed let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number, block.hash()), + format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), original_updates, )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number, block.hash()), + format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), &trie_output, )?; warn!( @@ -292,23 +296,24 @@ where } } -impl InvalidBlockHook for InvalidBlockWitnessHook +impl InvalidBlockHook for InvalidBlockWitnessHook where + N: NodePrimitives, P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
>, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { - if let Err(err) = self.on_invalid_block(parent_header, block, output, trie_updates) { + if let Err(err) = self.on_invalid_block::(parent_header, block, output, trie_updates) { warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook"); } } diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index b06750d66df..b8cab99970a 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -72,7 +72,7 @@ where payload_builder: PayloadBuilderHandle, payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, to_engine: UnboundedSender>, from_engine: EngineMessageStream, diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs index 13c606511dd..cfd127ae6f4 100644 --- a/crates/engine/primitives/src/invalid_block_hook.rs +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -1,35 +1,36 @@ use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_trie::updates::TrieUpdates; /// An invalid block hook. -pub trait InvalidBlockHook: Send + Sync { +pub trait InvalidBlockHook: Send + Sync { /// Invoked when an invalid block is encountered. fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ); } -impl InvalidBlockHook for F +impl InvalidBlockHook for F where + N: NodePrimitives, F: Fn( - &SealedHeader, - &SealedBlockWithSenders, - &BlockExecutionOutput, + &SealedHeader, + &SealedBlockWithSenders, + &BlockExecutionOutput, Option<(&TrieUpdates, B256)>, ) + Send + Sync, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { self(parent_header, block, output, trie_updates) diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index d3c07c63871..bc3e36beafc 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -78,7 +78,7 @@ where payload_builder: PayloadBuilderHandle, payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, ) -> Self where diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs index 98244ed1349..7c7b0631dd2 100644 --- a/crates/engine/tree/src/tree/invalid_block_hook.rs +++ b/crates/engine/tree/src/tree/invalid_block_hook.rs @@ -1,6 +1,6 @@ use alloy_primitives::B256; use reth_engine_primitives::InvalidBlockHook; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_provider::BlockExecutionOutput; use reth_trie::updates::TrieUpdates; @@ -9,32 +9,32 @@ use reth_trie::updates::TrieUpdates; #[non_exhaustive] pub struct NoopInvalidBlockHook; -impl InvalidBlockHook for NoopInvalidBlockHook { +impl InvalidBlockHook for NoopInvalidBlockHook { fn on_invalid_block( &self, - _parent_header: &SealedHeader, - _block: &SealedBlockWithSenders, - _output: &BlockExecutionOutput, + _parent_header: &SealedHeader, + _block: &SealedBlockWithSenders, + _output: &BlockExecutionOutput, _trie_updates: Option<(&TrieUpdates, B256)>, ) { } } /// Multiple [`InvalidBlockHook`]s that are executed in order. -pub struct InvalidBlockHooks(pub Vec>); +pub struct InvalidBlockHooks(pub Vec>>); -impl std::fmt::Debug for InvalidBlockHooks { +impl std::fmt::Debug for InvalidBlockHooks { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() } } -impl InvalidBlockHook for InvalidBlockHooks { +impl InvalidBlockHook for InvalidBlockHooks { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { for hook in &self.0 { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 16e07e51844..c6503597381 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -469,6 +469,7 @@ pub enum TreeAction { /// emitting events. pub struct EngineApiTreeHandler where + N: NodePrimitives, T: EngineTypes, { provider: P, @@ -507,7 +508,7 @@ where /// Metrics for the engine api. metrics: EngineApiMetrics, /// An invalid block hook. - invalid_block_hook: Box, + invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, /// Captures the types the engine operates on @@ -516,6 +517,8 @@ where impl std::fmt::Debug for EngineApiTreeHandler +where + N: NodePrimitives, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -597,7 +600,7 @@ where } /// Sets the invalid block hook. - fn set_invalid_block_hook(&mut self, invalid_block_hook: Box) { + fn set_invalid_block_hook(&mut self, invalid_block_hook: Box>) { self.invalid_block_hook = invalid_block_hook; } @@ -616,7 +619,7 @@ where payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, kind: EngineApiKind, ) -> (Sender>>, UnboundedReceiver) { let best_block_number = provider.best_block_number().unwrap_or(0); diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 65fbbdd2568..e21b42433dd 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -152,7 +152,7 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - self.system_caller.apply_pre_execution_changes(block, &mut evm)?; + self.system_caller.apply_pre_execution_changes(&block.block, &mut evm)?; Ok(()) } @@ -247,7 +247,7 @@ where drop(evm); let mut balance_increments = - post_block_balance_increments(&self.chain_spec, block, total_difficulty); + post_block_balance_increments(&self.chain_spec, &block.block, total_difficulty); // Irregular state change at Ethereum DAO hardfork if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 0e207fc2dbe..5104c466399 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,43 +1,55 @@ //! State changes that are not related to transactions. +use alloy_consensus::BlockHeader; use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::Block; +use reth_primitives_traits::BlockBody; /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). #[inline] -pub fn post_block_balance_increments( +pub fn post_block_balance_increments( chain_spec: &ChainSpec, block: &Block, total_difficulty: U256, -) -> HashMap { +) -> HashMap +where + ChainSpec: EthereumHardforks, + Block: reth_primitives_traits::Block, +{ let mut balance_increments = HashMap::default(); // Add block rewards if they are enabled. - if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block.number, block.difficulty, total_difficulty) - { + if let Some(base_block_reward) = calc::base_block_reward( + chain_spec, + block.header().number(), + block.header().difficulty(), + total_difficulty, + ) { // Ommer rewards - for ommer in &block.body.ommers { - *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block.number, ommer.number); + if let Some(ommers) = block.body().ommers() { + for ommer in ommers { + *balance_increments.entry(ommer.beneficiary()).or_default() += + calc::ommer_reward(base_block_reward, block.header().number(), ommer.number()); + } } // Full block reward - *balance_increments.entry(block.beneficiary).or_default() += - calc::block_reward(base_block_reward, block.body.ommers.len()); + *balance_increments.entry(block.header().beneficiary()).or_default() += calc::block_reward( + base_block_reward, + block.body().ommers().map(|s| s.len()).unwrap_or(0), + ); } // process withdrawals insert_post_block_withdrawals_balance_increments( chain_spec, - block.timestamp, - block.body.withdrawals.as_ref().map(|w| w.as_slice()), + block.header().timestamp(), + block.body().withdrawals().as_ref().map(|w| w.as_slice()), &mut balance_increments, ); diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index 4848feb7281..0cc2b83a3ca 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,7 +4,6 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -35,7 +34,7 @@ pub(crate) fn transact_blockhashes_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { if !chain_spec.is_prague_active_at_timestamp(block_timestamp) { return Ok(None) diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index 2ad02c26eb9..bfd5797214e 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -2,7 +2,6 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; @@ -31,7 +30,7 @@ pub(crate) fn transact_beacon_root_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Spec: EthereumHardforks, { if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index f20b7a54c08..d3c6d84903e 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,7 +1,6 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; -use alloy_consensus::Header; use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -21,7 +20,7 @@ pub(crate) fn transact_withdrawal_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 112f724df76..28ae0160cdf 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,7 +1,6 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; -use alloy_consensus::Header; use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -22,7 +21,7 @@ pub(crate) fn transact_consolidation_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 2a5b80ad66d..8af72094be4 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -2,7 +2,7 @@ use crate::ConfigureEvm; use alloc::{boxed::Box, sync::Arc}; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::{ eip7002::WITHDRAWAL_REQUEST_TYPE, eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, }; @@ -10,7 +10,6 @@ use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, B256}; @@ -91,11 +90,11 @@ where impl SystemCaller where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Chainspec: EthereumHardforks, { /// Apply pre execution changes. - pub fn apply_pre_execution_changes( + pub fn apply_pre_execution_changes( &mut self, block: &Block, evm: &mut Evm<'_, Ext, DB>, @@ -103,17 +102,18 @@ where where DB: Database + DatabaseCommit, DB::Error: Display, + Block: reth_primitives_traits::Block
, { self.apply_blockhashes_contract_call( - block.timestamp, - block.number, - block.parent_hash, + block.header().timestamp(), + block.header().number(), + block.header().parent_hash(), evm, )?; self.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, + block.header().timestamp(), + block.header().number(), + block.header().parent_beacon_block_root(), evm, )?; diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index ab4595d3362..7d209a90fca 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -26,6 +26,5 @@ reth-node-types.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true -alloy-consensus.workspace = true eyre.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 83947208ca8..1b490c4cf41 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,14 +1,13 @@ //! Traits for configuring a node. use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; -use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, TxTy}; +use reth_node_types::{HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, TxTy}; use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; @@ -50,7 +49,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
; + type Evm: ConfigureEvm
>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider::Primitives>; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 4c04c9200d2..15f6c12319f 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,10 +7,9 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use alloy_consensus::Header; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; @@ -378,7 +377,7 @@ where Pool: TransactionPool>> + Unpin + 'static, - EVM: ConfigureEvm
, + EVM: ConfigureEvm
>, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 0c75ef3016f..5ecc67d8b76 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,7 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; -use alloy_consensus::Header; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::ConfigureEvm; +use reth_node_api::{ConfigureEvm, HeaderTy}; use std::future::Future; /// A type that knows how to build the executor types. @@ -10,7 +9,7 @@ pub trait ExecutorBuilder: Send { /// The EVM config to use. /// /// This provides the node with the necessary configuration to configure an EVM. - type EVM: ConfigureEvm
; + type EVM: ConfigureEvm
>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider< @@ -27,7 +26,7 @@ pub trait ExecutorBuilder: Send { impl ExecutorBuilder for F where Node: FullNodeTypes, - EVM: ConfigureEvm
, + EVM: ConfigureEvm
>, Executor: BlockExecutorProvider::Primitives>, F: FnOnce(&BuilderContext) -> Fut + Send, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 22a47e3daa4..764277dcbf2 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -22,12 +22,11 @@ pub use payload::*; pub use pool::*; use crate::{ConfigureEvm, FullNodeTypes}; -use alloy_consensus::Header; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -41,7 +40,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
; + type Evm: ConfigureEvm
>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider::Primitives>; @@ -100,7 +99,7 @@ where Pool: TransactionPool>> + Unpin + 'static, - EVM: ConfigureEvm
, + EVM: ConfigureEvm
>, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { @@ -140,7 +139,7 @@ impl Clone for Components, + EVM: ConfigureEvm
>, Executor: BlockExecutorProvider, Cons: Clone, { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 25c81a8d5cf..f4557bd2272 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -22,7 +22,9 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodePrimitives, FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{ + FullNodePrimitives, FullNodeTypes, NodePrimitives, NodeTypes, NodeTypesWithDB, +}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -39,7 +41,7 @@ use reth_node_metrics::{ server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; -use reth_primitives::Head; +use reth_primitives::{Head, TransactionSigned}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, @@ -870,11 +872,16 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes, + T: FullNodeTypes< + Provider: StateProviderFactory + ChainSpecProvider, + Types: ProviderNodeTypes>, + >, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. - pub fn invalid_block_hook(&self) -> eyre::Result> { + pub fn invalid_block_hook( + &self, + ) -> eyre::Result::Primitives>>> { let Some(ref hook) = self.node_config().debug.invalid_block_hook else { return Ok(Box::new(NoopInvalidBlockHook::default())) }; @@ -898,7 +905,7 @@ where InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { eyre::bail!("invalid block hook {hook:?} is not implemented yet") } - } as Box) + } as Box>) }) .collect::>()?; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 549f52c89de..3673f73a894 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -258,7 +258,7 @@ where _receipts: &[Receipt], ) -> Result { let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), &block.block, total_difficulty); // increment balances self.state .increment_balances(balance_increments.clone()) From 64c6343fd126310efb037494a5fd1420d4f874a6 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:42:54 -0500 Subject: [PATCH 856/970] chore: propagate network primitives to `on_command`, `on_network_event` (#13084) --- crates/net/network/src/transactions/mod.rs | 88 +++++++++++----------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 5463b20f7f3..b352dfe3136 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1005,51 +1005,9 @@ where let _ = response.send(Ok(resp)); } } -} - -impl TransactionsManager -where - Pool: TransactionPool + 'static, - Pool::Transaction: - PoolTransaction>, -{ - /// Handles dedicated transaction events related to the `eth` protocol. - fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { - match event { - NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { - // ensure we didn't receive any blob transactions as these are disallowed to be - // broadcasted in full - - let has_blob_txs = msg.has_eip4844(); - - let non_blob_txs = msg - .0 - .into_iter() - .map(PooledTransactionsElement::try_from_broadcast) - .filter_map(Result::ok) - .collect(); - - self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); - - if has_blob_txs { - debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); - self.report_peer_bad_transactions(peer_id); - } - } - NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { - self.on_new_pooled_transaction_hashes(peer_id, msg) - } - NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { - self.on_get_pooled_transactions(peer_id, request, response) - } - NetworkTransactionEvent::GetTransactionsHandle(response) => { - let _ = response.send(Some(self.handle())); - } - } - } /// Handles a command received from a detached [`TransactionsHandle`] - fn on_command(&mut self, cmd: TransactionsCommand) { + fn on_command(&mut self, cmd: TransactionsCommand) { match cmd { TransactionsCommand::PropagateHash(hash) => { self.on_new_pending_transactions(vec![hash]) @@ -1089,7 +1047,7 @@ where } /// Handles a received event related to common network events. - fn on_network_event(&mut self, event_result: NetworkEvent) { + fn on_network_event(&mut self, event_result: NetworkEvent>) { match event_result { NetworkEvent::SessionClosed { peer_id, .. } => { // remove the peer @@ -1141,6 +1099,48 @@ where _ => {} } } +} + +impl TransactionsManager +where + Pool: TransactionPool + 'static, + Pool::Transaction: + PoolTransaction>, +{ + /// Handles dedicated transaction events related to the `eth` protocol. + fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { + match event { + NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { + // ensure we didn't receive any blob transactions as these are disallowed to be + // broadcasted in full + + let has_blob_txs = msg.has_eip4844(); + + let non_blob_txs = msg + .0 + .into_iter() + .map(PooledTransactionsElement::try_from_broadcast) + .filter_map(Result::ok) + .collect(); + + self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); + + if has_blob_txs { + debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); + self.report_peer_bad_transactions(peer_id); + } + } + NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { + self.on_new_pooled_transaction_hashes(peer_id, msg) + } + NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { + self.on_get_pooled_transactions(peer_id, request, response) + } + NetworkTransactionEvent::GetTransactionsHandle(response) => { + let _ = response.send(Some(self.handle())); + } + } + } /// Starts the import process for the given transactions. fn import_transactions( From e4c7fac9dbd175f705035ef72ff164db5250b9b9 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 3 Dec 2024 17:43:35 +0100 Subject: [PATCH 857/970] chore(trie): make Debug impl work for any provider (#13107) --- crates/trie/sparse/src/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 85116868f33..9b4b3800251 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -46,7 +46,7 @@ impl Default for SparseStateTrie { } } -impl fmt::Debug for SparseStateTrie { +impl fmt::Debug for SparseStateTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SparseStateTrie") .field("state", &self.state) From 886471d69383a9df371bb32b99791bdab91f648d Mon Sep 17 00:00:00 2001 From: frisitano <35734660+frisitano@users.noreply.github.com> Date: Wed, 4 Dec 2024 00:56:17 +0800 Subject: [PATCH 858/970] Introduce `HashedPostStateProvider` (#12607) Co-authored-by: Matthias Seitz --- Cargo.lock | 5 +--- .../src/commands/debug_cmd/build_block.rs | 5 ++-- .../commands/debug_cmd/in_memory_merkle.rs | 10 ++++--- crates/blockchain-tree/src/blockchain_tree.rs | 13 +++++--- crates/blockchain-tree/src/chain.rs | 11 ++++--- crates/chain-state/Cargo.toml | 6 ++-- crates/chain-state/src/in_memory.rs | 10 +++++-- crates/chain-state/src/memory_overlay.rs | 11 +++++-- .../engine/invalid-block-hooks/src/witness.rs | 4 +-- crates/engine/tree/src/tree/mod.rs | 10 ++++--- crates/engine/tree/src/tree/root.rs | 8 ++++- crates/engine/util/src/reorg.rs | 3 +- crates/ethereum/payload/Cargo.toml | 1 - crates/ethereum/payload/src/lib.rs | 3 +- .../execution-types/src/execution_outcome.rs | 6 ++-- crates/optimism/payload/Cargo.toml | 1 - crates/optimism/payload/src/builder.rs | 27 +++++++++-------- crates/revm/src/test_utils.rs | 14 ++++++--- crates/rpc/rpc-eth-api/Cargo.toml | 1 - .../rpc-eth-api/src/helpers/pending_block.rs | 3 +- crates/rpc/rpc-eth-types/src/cache/db.rs | 11 ++++++- crates/rpc/rpc/Cargo.toml | 1 - crates/rpc/rpc/src/validation.rs | 3 +- .../src/providers/blockchain_provider.rs | 27 ++++++++++++++--- .../src/providers/bundle_state_provider.rs | 22 +++++++++----- .../provider/src/providers/consistent_view.rs | 10 ++++--- .../provider/src/providers/database/mod.rs | 29 ++++++++++++++---- .../src/providers/state/historical.rs | 30 +++++++++++++++++-- .../provider/src/providers/state/latest.rs | 26 ++++++++++++++-- .../provider/src/providers/state/macros.rs | 3 ++ .../storage/provider/src/test_utils/mock.rs | 13 +++++++- .../storage/provider/src/test_utils/noop.rs | 10 ++++++- crates/storage/provider/src/writer/mod.rs | 10 ++----- crates/storage/storage-api/Cargo.toml | 1 + crates/storage/storage-api/src/noop.rs | 12 ++++++-- crates/storage/storage-api/src/state.rs | 12 +++++++- crates/trie/db/src/prefix_set.rs | 29 +++++++++++------- crates/trie/db/src/state.rs | 20 +++++++------ crates/trie/parallel/src/proof.rs | 8 ++++- crates/trie/parallel/src/root.rs | 8 ++++- crates/trie/trie/benches/hash_post_state.rs | 4 +-- crates/trie/trie/src/state.rs | 14 +++++---- 42 files changed, 318 insertions(+), 137 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91ab9d891f5..3448ddbd451 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7528,7 +7528,6 @@ dependencies = [ "reth-provider", "reth-revm", "reth-transaction-pool", - "reth-trie", "revm", "tracing", ] @@ -8449,7 +8448,6 @@ dependencies = [ "reth-revm", "reth-rpc-types-compat", "reth-transaction-pool", - "reth-trie", "revm", "sha2 0.10.8", "thiserror 2.0.3", @@ -8861,7 +8859,6 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-transaction-pool", - "reth-trie", "revm", "revm-inspectors", "revm-primitives", @@ -9041,7 +9038,6 @@ dependencies = [ "reth-rpc-types-compat", "reth-tasks", "reth-transaction-pool", - "reth-trie", "reth-trie-common", "revm", "revm-inspectors", @@ -9292,6 +9288,7 @@ dependencies = [ "reth-storage-errors", "reth-trie", "reth-trie-db", + "revm", ] [[package]] diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index c7e1be893af..0e4d3f7188a 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -263,7 +263,8 @@ impl> Command { let block_with_senders = SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); - let db = StateProviderDatabase::new(blockchain_db.latest()?); + let state_provider = blockchain_db.latest()?; + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -273,7 +274,7 @@ impl> Command { ExecutionOutcome::from((block_execution_output, block.number)); debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - let hashed_post_state = execution_outcome.hash_state_slow(); + let hashed_post_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( provider_factory.provider()?.tx_ref(), hashed_post_state.clone(), diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 6fbfa33b891..58b86648b90 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -24,8 +24,9 @@ use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockExt; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, - HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StorageLocation, StorageReader, + HashedPostStateProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StorageLocation, + StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -143,7 +144,8 @@ impl> Command { ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); + let state_provider = LatestStateProviderRef::new(&provider); + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -165,7 +167,7 @@ impl> Command { // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( provider.tx_ref(), - execution_outcome.hash_state_slow(), + state_provider.hashed_post_state(execution_outcome.state()), )?; if in_memory_state_root == block.state_root { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 91ddd75f2a7..e8576de4a71 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -24,8 +24,8 @@ use reth_primitives::{ use reth_provider::{ BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, - StaticFileProviderFactory, StorageLocation, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, + ProviderError, StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -1215,7 +1215,7 @@ where recorder: &mut MakeCanonicalDurationsRecorder, ) -> Result<(), CanonicalError> { let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = state.hash_state_slow(); + let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); let prefix_sets = hashed_state.construct_prefix_sets().freeze(); let hashed_state_sorted = hashed_state.into_sorted(); @@ -1885,7 +1885,12 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); + let prefix_sets = tree + .externals + .provider_factory + .hashed_post_state(exec5.state()) + .construct_prefix_sets() + .freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index a3cbd432de8..4002fae1ac9 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -18,11 +18,11 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, - DBProvider, FullExecutionDataProvider, ProviderError, StateRootProvider, - TryIntoHistoricalStateProvider, + DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, + StateRootProvider, TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, @@ -228,14 +228,13 @@ impl AppendableChain { execution_outcome.extend(initial_execution_outcome.clone()); ParallelStateRoot::new( consistent_view, - TrieInput::from_state(execution_outcome.hash_state_slow()), + TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), ) .incremental_root_with_updates() .map(|(root, updates)| (root, Some(updates))) .map_err(ProviderError::from)? } else { - let hashed_state = - HashedPostState::from_bundle_state(&initial_execution_outcome.state().state); + let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); let state_root = provider.state_root(hashed_state)?; (state_root, None) }; diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d2ef5870947..2b06bd93707 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -26,6 +26,7 @@ reth-trie.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +revm.workspace = true # async tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } @@ -44,7 +45,6 @@ pin-project.workspace = true alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } rand = { workspace = true, optional = true } -revm = { workspace = true, optional = true } [dev-dependencies] reth-testing-utils.workspace = true @@ -52,17 +52,15 @@ alloy-signer.workspace = true alloy-signer-local.workspace = true alloy-consensus.workspace = true rand.workspace = true -revm.workspace = true [features] test-utils = [ "alloy-signer", "alloy-signer-local", "rand", - "revm", "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-primitives-traits/test-utils", "reth-trie/test-utils", - "revm?/test-utils", + "revm/test-utils", ] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 3cd6f464562..670c340db4b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -949,8 +949,8 @@ mod tests { use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_trie::{ AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, @@ -1047,6 +1047,12 @@ mod tests { } } + impl HashedPostStateProvider for MockStateProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } + } + impl StorageRootProvider for MockStateProvider { fn storage_root( &self, diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index c84bd8c93f0..21bc30b07cf 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -8,13 +8,14 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; +use revm::db::BundleState; use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as a @@ -218,6 +219,12 @@ macro_rules! impl_state_provider { } } + impl $($tokens)* HashedPostStateProvider for $type { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + self.historical.hashed_post_state(bundle_state) + } + } + impl $($tokens)* StateProvider for $type { fn storage( &self, diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 08681a9d17a..cc98837a98c 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -17,7 +17,7 @@ use reth_revm::{ }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; -use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; +use reth_trie::{updates::TrieUpdates, HashedStorage}; use serde::Serialize; use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; @@ -129,7 +129,7 @@ where // // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes // referenced accounts + storage slots. - let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); for (address, account) in db.cache.accounts { let hashed_address = keccak256(address); hashed_state diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c6503597381..763d5d990c5 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -41,8 +41,8 @@ use reth_primitives::{ }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, - ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; @@ -552,6 +552,8 @@ where + BlockReader + StateProviderFactory + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + Clone + 'static,

::Provider: BlockReader, @@ -1568,7 +1570,7 @@ where .provider .get_state(block.number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; - let hashed_state = execution_output.hash_state_slow(); + let hashed_state = self.provider.hashed_post_state(execution_output.state()); Ok(Some(ExecutedBlock { block: Arc::new(block), @@ -2242,7 +2244,7 @@ where return Err(err.into()) } - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + let hashed_state = self.provider.hashed_post_state(&output.state); trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); let root_time = Instant::now(); diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 78a8332b5eb..eea236fdaae 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -3,6 +3,7 @@ use alloy_primitives::map::{HashMap, HashSet}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, }; use reth_trie::{ proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, @@ -179,7 +180,12 @@ pub(crate) struct StateRootTask { #[allow(dead_code)] impl StateRootTask where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Creates a new state root task with the unified message channel pub(crate) fn new( diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 18a8c4737b5..4f7e213b058 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -29,7 +29,6 @@ use reth_revm::{ DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; -use reth_trie::HashedPostState; use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, @@ -382,7 +381,7 @@ where reorg_target.number, Default::default(), ); - let hashed_state = HashedPostState::from_bundle_state(&outcome.state().state); + let hashed_state = state_provider.hashed_post_state(outcome.state()); let (blob_gas_used, excess_blob_gas) = if chain_spec.is_cancun_active_at_timestamp(reorg_target.timestamp) { diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 4e0880d1d15..b01f4c5bc74 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -25,7 +25,6 @@ reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-errors.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index df900f1f36b..a5e6bcaa5f0 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -39,7 +39,6 @@ use reth_transaction_pool::{ error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{ @@ -413,7 +412,7 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.database.db.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index c9e85ae444f..1dca5f2fc9e 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -3,7 +3,7 @@ use alloy_eips::eip7685::Requests; use alloy_primitives::{logs_bloom, Address, BlockNumber, Bloom, Log, B256, U256}; use reth_primitives::Receipts; use reth_primitives_traits::{receipt::ReceiptExt, Account, Bytecode, Receipt, StorageEntry}; -use reth_trie::HashedPostState; +use reth_trie::{HashedPostState, KeyHasher}; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, @@ -164,8 +164,8 @@ impl ExecutionOutcome { /// Returns [`HashedPostState`] for this execution outcome. /// See [`HashedPostState::from_bundle_state`] for more info. - pub fn hash_state_slow(&self) -> HashedPostState { - HashedPostState::from_bundle_state(&self.bundle.state) + pub fn hash_state_slow(&self) -> HashedPostState { + HashedPostState::from_bundle_state::(&self.bundle.state) } /// Transform block number to the index of block. diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 7f47da7e236..1c4f855b6aa 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -26,7 +26,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true # op-reth diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index d385ca79546..91e70d0c3c8 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -22,12 +22,14 @@ use reth_primitives::{ proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, }; -use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; +use reth_provider::{ + HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, + StateRootProvider, +}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ noop::NoopTransactionPool, BestTransactionsAttributes, PoolTransaction, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{ @@ -339,7 +341,7 @@ where where EvmConfig: ConfigureEvm

, DB: Database + AsRef

, - P: StateRootProvider, + P: StateRootProvider + HashedPostStateProvider, { let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, @@ -367,17 +369,16 @@ where execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let state_provider = state.database.as_ref(); + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { - state.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( - |err| { - warn!(target: "payload_builder", - parent_header=%ctx.parent().hash(), - %err, - "failed to calculate state root for payload" - ); - }, - )? + state_provider.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? }; // create the block header diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 443d1d5ebcf..9460d3e1c78 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -6,13 +6,13 @@ use alloy_primitives::{ }; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - StorageMultiProof, StorageProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, + MultiProof, StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -150,6 +150,12 @@ impl StateProofProvider for StateProviderTest { } } +impl HashedPostStateProvider for StateProviderTest { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::(bundle_state.state()) + } +} + impl StateProvider for StateProviderTest { fn storage( &self, diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 859caa821cd..cf701187dad 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,7 +30,6 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-node-api.workspace = true -reth-trie.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] } # ethereum diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 782c1a2a8f6..4c81626649d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -36,7 +36,6 @@ use reth_transaction_pool::{ error::InvalidPoolTransactionError, BestTransactionsAttributes, PoolTransaction, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use std::time::{Duration, Instant}; use tokio::sync::Mutex; @@ -427,7 +426,7 @@ pub trait LoadPendingBlock: block_number, Vec::new(), ); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.database.hashed_post_state(execution_outcome.state()); let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 1fbe16a2ed9..ed107f3b0a9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -8,7 +8,7 @@ use alloy_primitives::{ }; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; -use reth_storage_api::StateProvider; +use reth_storage_api::{HashedPostStateProvider, StateProvider}; use reth_trie::HashedStorage; use revm::Database; @@ -139,6 +139,15 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { } } +impl HashedPostStateProvider for StateProviderTraitObjWrapper<'_> { + fn hashed_post_state( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_trie::HashedPostState { + self.0.hashed_post_state(bundle_state) + } +} + impl StateProvider for StateProviderTraitObjWrapper<'_> { fn storage( &self, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 834b1a963bf..5efae46f006 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -34,7 +34,6 @@ reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true -reth-trie.workspace = true reth-consensus.workspace = true reth-payload-validator.workspace = true diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index d862bc5f30d..b72a5d35769 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -25,7 +25,6 @@ use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; use reth_rpc_server_types::result::internal_rpc_err; use reth_tasks::TaskSpawner; -use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; @@ -186,7 +185,7 @@ where self.ensure_payment(&block, &output, &message)?; let state_root = - state_provider.state_root(HashedPostState::from_bundle_state(&output.state.state))?; + state_provider.state_root(state_provider.hashed_post_state(&output.state))?; if state_root != block.state_root { return Err(ConsensusError::BodyStateRootDiff( diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1bb65a6e4fb..8330ef3a66e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -4,8 +4,8 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, EvmEnvProvider, FullProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HashedPostStateProvider, HeaderProvider, + ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; @@ -34,9 +34,16 @@ use reth_primitives::{ use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageChangeSetReader}; +use reth_storage_api::{ + DBProvider, NodePrimitivesProvider, StateCommitmentProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::{ + db::BundleState, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, @@ -171,6 +178,10 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } } +impl StateCommitmentProvider for BlockchainProvider2 { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for BlockchainProvider2 { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() @@ -663,6 +674,14 @@ impl StateProviderFactory for BlockchainProvider2 { } } +impl HashedPostStateProvider for BlockchainProvider2 { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + impl CanonChainTracker for BlockchainProvider2 { type Header = HeaderTy; diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 652f6fb33fd..619296b57f3 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -6,7 +6,7 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, B256, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -87,7 +87,7 @@ impl StateRootProvider { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root(state) } @@ -101,7 +101,7 @@ impl StateRootProvider hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root_with_updates(state) } @@ -111,7 +111,7 @@ impl StateRootProvider mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.state_root_from_nodes_with_updates(input) } } @@ -162,7 +162,7 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.proof(input, address, slots) } @@ -172,7 +172,7 @@ impl StateProofProvider targets: HashMap>, ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.multiproof(input, targets) } @@ -182,11 +182,19 @@ impl StateProofProvider target: HashedPostState, ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.witness(input, target) } } +impl HashedPostStateProvider + for BundleStateProvider +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + self.state_provider.hashed_post_state(bundle_state) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 4640f460335..479537f120c 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -2,11 +2,11 @@ use crate::{BlockNumReader, DatabaseProviderFactory, HeaderProvider}; use alloy_primitives::B256; use reth_errors::ProviderError; use reth_primitives::GotExpected; -use reth_storage_api::{BlockReader, DBProvider}; +use reth_storage_api::{BlockReader, DBProvider, StateCommitmentProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; -use reth_trie_db::DatabaseHashedPostState; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; pub use reth_storage_errors::provider::ConsistentViewError; @@ -33,7 +33,7 @@ pub struct ConsistentDbView { impl ConsistentDbView where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderFactory + StateCommitmentProvider, { /// Creates new consistent database view. pub const fn new(factory: Factory, tip: Option) -> Self { @@ -59,7 +59,9 @@ where { Ok(HashedPostState::default()) } else { - Ok(HashedPostState::from_reverts(provider.tx_ref(), block_number + 1)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(provider.tx_ref(), block_number + 1)?) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index d34d67d9dc8..39230e253ed 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -3,9 +3,9 @@ use crate::{ to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + EvmEnvProvider, HashedPostStateProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, @@ -25,9 +25,16 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{NodePrimitivesProvider, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + NodePrimitivesProvider, StateCommitmentProvider, TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::{ + db::BundleState, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ ops::{RangeBounds, RangeInclusive}, path::Path, @@ -219,6 +226,10 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StateCommitmentProvider for ProviderFactory { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for ProviderFactory { /// Returns static file provider fn static_file_provider(&self) -> StaticFileProvider { @@ -651,6 +662,14 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl HashedPostStateProvider for ProviderFactory { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + impl Clone for ProviderFactory { fn clone(&self) -> Self { Self { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index ad36a4a5ab3..93752c1e278 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - ProviderError, StateProvider, StateRootProvider, + HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ @@ -28,7 +28,7 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, StateCommitment, }; use std::fmt::Debug; @@ -136,7 +136,9 @@ impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> ); } - Ok(HashedPostState::from_reverts(self.tx(), self.block_number)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(self.tx(), self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -394,6 +396,16 @@ impl StateProof } } +impl HashedPostStateProvider + for HistoricalStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + impl StateProvider for HistoricalStateProviderRef<'_, Provider> { @@ -433,6 +445,12 @@ impl StateCommitmentProvider + for HistoricalStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] @@ -482,6 +500,12 @@ impl } } +impl StateCommitmentProvider + for HistoricalStateProvider +{ + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [HistoricalStateProviderRef] delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index a2ec4972d10..bdb6de1e569 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - StateProvider, StateRootProvider, + HashedPostStateProvider, StateProvider, StateRootProvider, }; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -22,7 +22,7 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, - DatabaseTrieWitness, + DatabaseTrieWitness, StateCommitment, }; /// State provider over latest state that takes tx reference. @@ -157,6 +157,16 @@ impl StateProofProvider } } +impl HashedPostStateProvider + for LatestStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + impl StateProvider for LatestStateProviderRef<'_, Provider> { @@ -181,11 +191,17 @@ impl StateProv } } +impl StateCommitmentProvider + for LatestStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for the latest state. #[derive(Debug)] pub struct LatestStateProvider(Provider); -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider pub const fn new(db: Provider) -> Self { Self(db) @@ -198,6 +214,10 @@ impl LatestStateProvider { } } +impl StateCommitmentProvider for LatestStateProvider { + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [LatestStateProviderRef] delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index f2648fb15e6..1fa15214e9a 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -57,6 +57,9 @@ macro_rules! delegate_provider_impls { fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } + HashedPostStateProvider $(where [$($generics)*])? { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> reth_trie::HashedPostState; + } ); } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 385e5e8205d..6815bbcb123 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -29,7 +29,8 @@ use reth_primitives::{ use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, + DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, + StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -164,6 +165,10 @@ impl NodeTypes for MockNode { type Storage = EthStorage; } +impl StateCommitmentProvider for MockEthProvider { + type StateCommitment = ::StateCommitment; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; type Provider = DatabaseProvider; @@ -682,6 +687,12 @@ impl StateProofProvider for MockEthProvider { } } +impl HashedPostStateProvider for MockEthProvider { + fn hashed_post_state(&self, _state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for MockEthProvider { fn storage( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 5120afffa85..a33e4159be2 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -27,7 +27,9 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{NodePrimitivesProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + HashedPostStateProvider, NodePrimitivesProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -413,6 +415,12 @@ impl StateProofProvider for NoopProvider { } } +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for NoopProvider { fn storage( &self, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 02e912050d5..dc5af491efc 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -234,7 +234,7 @@ mod tests { }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; - use reth_storage_api::DatabaseProviderFactory; + use reth_storage_api::{DatabaseProviderFactory, HashedPostStateProvider}; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, @@ -1118,13 +1118,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::::new( - state.bundle_state.clone(), - Receipts::default(), - 0, - Vec::new() - ) - .hash_state_slow(), + provider_factory.hashed_post_state(&state.bundle_state) ) .unwrap(), state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index ba2ccf1b157..7ebff976d13 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -25,6 +25,7 @@ reth-storage-errors.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true reth-db.workspace = true +revm.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 9c971e7b293..0a6341cc4b4 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -2,9 +2,9 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, ChangeSetReader, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, TransactionVariant, + BlockSource, ChangeSetReader, HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{ @@ -456,6 +456,12 @@ impl StateProofProvider for NoopProvider HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for NoopProvider { fn storage( &self, diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 0cb26d30743..dc53319f4c5 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -8,7 +8,9 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue use auto_impl::auto_impl; use reth_primitives::Bytecode; use reth_storage_errors::provider::ProviderResult; +use reth_trie::HashedPostState; use reth_trie_db::StateCommitment; +use revm::db::states::BundleState; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -21,6 +23,7 @@ pub trait StateProvider: + StateRootProvider + StorageRootProvider + StateProofProvider + + HashedPostStateProvider + Send + Sync { @@ -83,11 +86,18 @@ pub trait StateProvider: } /// Trait implemented for database providers that can provide the [`StateCommitment`] type. -pub trait StateCommitmentProvider { +pub trait StateCommitmentProvider: Send + Sync { /// The [`StateCommitment`] type that can be used to perform state commitment operations. type StateCommitment: StateCommitment; } +/// Trait that provides the hashed state from various sources. +#[auto_impl(&, Arc, Box)] +pub trait HashedPostStateProvider: Send + Sync { + /// Returns the `HashedPostState` of the provided [`BundleState`]. + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index ac8c3b05304..95ff6d91f37 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{keccak256, BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256}; use derive_more::Deref; use reth_db::tables; use reth_db_api::{ @@ -10,25 +10,34 @@ use reth_db_api::{ use reth_primitives::StorageEntry; use reth_trie::{ prefix_set::{PrefixSetMut, TriePrefixSets}, - Nibbles, + KeyHasher, Nibbles, }; use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, ops::RangeInclusive, }; /// A wrapper around a database transaction that loads prefix sets within a given block range. -#[derive(Deref, Debug)] -pub struct PrefixSetLoader<'a, TX>(&'a TX); +#[derive(Debug)] +pub struct PrefixSetLoader<'a, TX, KH>(&'a TX, PhantomData); -impl<'a, TX> PrefixSetLoader<'a, TX> { +impl<'a, TX, KH> PrefixSetLoader<'a, TX, KH> { /// Create a new loader. pub const fn new(tx: &'a TX) -> Self { - Self(tx) + Self(tx, PhantomData) } } -impl PrefixSetLoader<'_, TX> { +impl Deref for PrefixSetLoader<'_, TX, KH> { + type Target = TX; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl PrefixSetLoader<'_, TX, KH> { /// Load all account and storage changes for the given block range. pub fn load(self, range: RangeInclusive) -> Result { // Initialize prefix sets. @@ -41,7 +50,7 @@ impl PrefixSetLoader<'_, TX> { let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { @@ -55,12 +64,12 @@ impl PrefixSetLoader<'_, TX> { let storage_range = BlockNumberAddress::range(range); for storage_entry in storage_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets .entry(hashed_address) .or_default() - .insert(Nibbles::unpack(keccak256(key))); + .insert(Nibbles::unpack(KH::hash_key(key))); } Ok(TriePrefixSets { diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 6e2cea5051d..5aaf3ebe5b0 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,5 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -10,7 +10,8 @@ use reth_execution_errors::StateRootError; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, + updates::TrieUpdates, HashedPostState, HashedStorage, KeccakKeyHasher, KeyHasher, StateRoot, + StateRootProgress, TrieInput, }; use std::{collections::HashMap, ops::RangeInclusive}; use tracing::debug; @@ -122,7 +123,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified /// block up to the current tip and aggregates them into hashed state in reverse. - fn from_reverts(tx: &TX, from: BlockNumber) -> Result; + fn from_reverts(tx: &TX, from: BlockNumber) -> Result; } impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> @@ -136,7 +137,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> tx: &'a TX, range: RangeInclusive, ) -> Result { - let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; + let loaded_prefix_sets = PrefixSetLoader::<_, KeccakKeyHasher>::new(tx).load(range)?; Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) } @@ -216,7 +217,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { - fn from_reverts(tx: &TX, from: BlockNumber) -> Result { + fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::new(); let mut account_changesets_cursor = tx.cursor_read::()?; @@ -237,19 +238,19 @@ impl DatabaseHashedPostState for HashedPostState { } let hashed_accounts = - accounts.into_iter().map(|(address, info)| (keccak256(address), info)).collect(); + accounts.into_iter().map(|(address, info)| (KH::hash_key(address), info)).collect(); let hashed_storages = storages .into_iter() .map(|(address, storage)| { ( - keccak256(address), + KH::hash_key(address), HashedStorage::from_iter( // The `wiped` flag indicates only whether previous storage entries // should be looked up in db or not. For reverts it's a noop since all // wiped changes had been written as storage reverts. false, - storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), + storage.into_iter().map(|(slot, value)| (KH::hash_key(slot), value)), ), ) }) @@ -265,6 +266,7 @@ mod tests { use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; + use reth_trie::KeccakKeyHasher; use revm::{db::BundleState, primitives::AccountInfo}; #[test] @@ -285,7 +287,7 @@ mod tests { .build(); assert_eq!(bundle_state.reverts.len(), 1); - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + let post_state = HashedPostState::from_bundle_state::(&bundle_state.state); assert_eq!(post_state.accounts.len(), 2); assert_eq!(post_state.storages.len(), 2); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 94138823e00..f90a53fa99a 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -9,6 +9,7 @@ use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -53,7 +54,12 @@ impl ParallelProof { impl ParallelProof where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Generate a state multiproof according to specified targets. pub fn multiproof( diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 8d2b18f5e11..e66d1f78213 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -8,6 +8,7 @@ use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -58,7 +59,12 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 6e913ef78a3..7111a785f46 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -2,7 +2,7 @@ use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_trie::{HashedPostState, HashedStorage}; +use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher}; use revm::db::{states::BundleBuilder, BundleAccount}; pub fn hash_post_state(c: &mut Criterion) { @@ -19,7 +19,7 @@ pub fn hash_post_state(c: &mut Criterion) { // parallel group.bench_function(BenchmarkId::new("parallel hashing", size), |b| { - b.iter(|| HashedPostState::from_bundle_state(&state)) + b.iter(|| HashedPostState::from_bundle_state::(&state)) }); } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index fdfb86a53dd..3e390bf97bc 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -10,6 +10,7 @@ use alloy_primitives::{ use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; +use reth_trie_common::KeyHasher; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; use std::borrow::Cow; @@ -26,13 +27,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from bundle state. /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. - pub fn from_bundle_state<'a>( + pub fn from_bundle_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.info.clone().map(Into::into); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -53,13 +54,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from cached state. /// Hashes all changed accounts and storage entries that are currently stored in cache. - pub fn from_cache_state<'a>( + pub fn from_cache_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.account.as_ref().map(|a| a.info.clone().into()); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -354,6 +355,7 @@ impl HashedStorageSorted { mod tests { use super::*; use alloy_primitives::Bytes; + use reth_trie_common::KeccakKeyHasher; use revm::{ db::{ states::{plain_account::PlainStorage, StorageSlot}, @@ -467,7 +469,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the bundle state into a hashed post state. - let hashed_state = HashedPostState::from_bundle_state(state); + let hashed_state = HashedPostState::from_bundle_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); @@ -506,7 +508,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the cache state into a hashed post state. - let hashed_state = HashedPostState::from_cache_state(state); + let hashed_state = HashedPostState::from_cache_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); From 0fcc6cf19eeb7139ac330d6ef58db41bfb217c83 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 3 Dec 2024 21:09:44 +0400 Subject: [PATCH 859/970] feat: Add `Transaction` AT to `ConfigureEvm` (#13106) --- Cargo.lock | 1 + crates/engine/invalid-block-hooks/src/witness.rs | 12 ++++++------ crates/engine/util/src/reorg.rs | 4 ++-- crates/ethereum/evm/src/execute.rs | 16 +++++++++++++--- crates/ethereum/evm/src/lib.rs | 1 + crates/ethereum/node/Cargo.toml | 2 -- crates/ethereum/node/src/node.rs | 6 +++--- crates/ethereum/payload/src/lib.rs | 4 ++-- crates/evm/src/lib.rs | 12 +++++++----- crates/node/api/src/node.rs | 2 +- crates/node/builder/src/components/builder.rs | 2 +- crates/node/builder/src/components/execute.rs | 6 +++--- crates/node/builder/src/components/mod.rs | 6 +++--- crates/optimism/evm/src/execute.rs | 12 ++++++++---- crates/optimism/evm/src/lib.rs | 1 + crates/optimism/node/src/node.rs | 4 ++-- crates/optimism/payload/src/builder.rs | 14 +++++++------- crates/optimism/rpc/src/eth/pending_block.rs | 4 ++-- crates/optimism/rpc/src/witness.rs | 4 ++-- crates/rpc/rpc-builder/src/lib.rs | 4 ++-- crates/rpc/rpc-eth-api/Cargo.toml | 1 + crates/rpc/rpc-eth-api/src/helpers/call.rs | 7 ++++--- .../rpc/rpc-eth-api/src/helpers/pending_block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/pending_block.rs | 2 +- examples/custom-evm/src/main.rs | 2 ++ examples/stateful-precompile/src/main.rs | 1 + 26 files changed, 77 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3448ddbd451..855c02a22b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9031,6 +9031,7 @@ dependencies = [ "reth-network-api", "reth-node-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-eth-types", diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index cc98837a98c..632428d6b64 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -8,8 +8,8 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader, TransactionSigned}; -use reth_primitives_traits::{HeaderTy, SignedTransaction}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::states::bundle_state::BundleRetention, @@ -63,8 +63,8 @@ where trie_updates: Option<(&TrieUpdates, B256)>, ) -> eyre::Result<()> where - N: NodePrimitives, - EvmConfig: ConfigureEvm

, + N: NodePrimitives, + EvmConfig: ConfigureEvm
, { // TODO(alexey): unify with `DebugApi::debug_execution_witness` @@ -298,13 +298,13 @@ where impl InvalidBlockHook for InvalidBlockWitnessHook where - N: NodePrimitives, + N: NodePrimitives, P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, - EvmConfig: ConfigureEvm
>, + EvmConfig: ConfigureEvm
, { fn on_invalid_block( &self, diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 4f7e213b058..24e14162284 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -110,7 +110,7 @@ where S: Stream>, Engine: EngineTypes, Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { type Item = S::Item; @@ -257,7 +257,7 @@ fn create_reorg_head( ) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { let chain_spec = payload_validator.chain_spec(); diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index e21b42433dd..6cbbb69c906 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -58,8 +58,15 @@ impl EthExecutionStrategyFactory { impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory where - EvmConfig: - Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, { type Primitives = EthPrimitives; @@ -128,7 +135,10 @@ where impl BlockExecutionStrategy for EthExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, { type DB = DB; type Error = BlockExecutionError; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 8042562357f..509b61cb2ec 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -62,6 +62,7 @@ impl EthEvmConfig { impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 55421cf8478..f5fe1dac234 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,8 +32,6 @@ reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true -alloy-consensus.workspace = true - # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 58b6aeaf644..dd4f1e5802c 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,7 +2,6 @@ use std::sync::Arc; -use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -13,7 +12,8 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, TxTy, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, HeaderTy, NodeTypesWithDB, + TxTy, }; use reth_node_builder::{ components::{ @@ -242,7 +242,7 @@ impl EthereumPayloadBuilder { where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, Transaction = TxTy>, Pool: TransactionPool>> + Unpin + 'static, diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index a5e6bcaa5f0..f909d3840e2 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -91,7 +91,7 @@ where // Default implementation of [PayloadBuilder] for unit type impl PayloadBuilder for EthereumPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool>, { @@ -155,7 +155,7 @@ pub fn default_ethereum_payload( best_txs: F, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool>, F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index ae884bdd5f8..29f6d7c6581 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -20,7 +20,6 @@ extern crate alloc; use crate::builder::RethEvmBuilder; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; -use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -116,18 +115,21 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; + /// The transaction type. + type Transaction; + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. type Error: core::error::Error + Send + Sync; - /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. - fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { + /// Returns a [`TxEnv`] from a transaction and [`Address`]. + fn tx_env(&self, transaction: &Self::Transaction, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); self.fill_tx_env(&mut tx_env, transaction, signer); tx_env } - /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. - fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + /// Fill transaction environment from a transaction and the given sender address. + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &Self::Transaction, sender: Address); /// Fill transaction environment with a system contract call. fn fill_tx_env_system_contract_call( diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 1b490c4cf41..fc6366a2eb5 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -49,7 +49,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
>; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider::Primitives>; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 15f6c12319f..7e2d0eb43cc 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -377,7 +377,7 @@ where Pool: TransactionPool>> + Unpin + 'static, - EVM: ConfigureEvm
>, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 5ecc67d8b76..e3226fa8e37 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,7 +1,7 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{ConfigureEvm, HeaderTy}; +use reth_node_api::{ConfigureEvm, HeaderTy, TxTy}; use std::future::Future; /// A type that knows how to build the executor types. @@ -9,7 +9,7 @@ pub trait ExecutorBuilder: Send { /// The EVM config to use. /// /// This provides the node with the necessary configuration to configure an EVM. - type EVM: ConfigureEvm
>; + type EVM: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider< @@ -26,7 +26,7 @@ pub trait ExecutorBuilder: Send { impl ExecutorBuilder for F where Node: FullNodeTypes, - EVM: ConfigureEvm
>, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, F: FnOnce(&BuilderContext) -> Fut + Send, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 764277dcbf2..b643e2aa2a6 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -40,7 +40,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
>; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider::Primitives>; @@ -99,7 +99,7 @@ where Pool: TransactionPool>> + Unpin + 'static, - EVM: ConfigureEvm
>, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { @@ -139,7 +139,7 @@ impl Clone for Components>, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider, Cons: Clone, { diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 3673f73a894..205c85160dc 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -22,7 +22,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; use reth_optimism_primitives::OpPrimitives; -use reth_primitives::{BlockWithSenders, Receipt, TxType}; +use reth_primitives::{BlockWithSenders, Receipt, TransactionSigned, TxType}; use reth_revm::{Database, State}; use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; @@ -52,8 +52,12 @@ impl OpExecutionStrategyFactory { impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where - EvmConfig: - Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm
, { type Primitives = OpPrimitives; type Strategy + Display>> = @@ -115,7 +119,7 @@ where impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { type DB = DB; type Primitives = OpPrimitives; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 176864de6dc..7424379f5ae 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -57,6 +57,7 @@ impl OpEvmConfig { impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a13ab9dcec1..b2203331ddf 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -34,7 +34,7 @@ use reth_optimism_rpc::{ OpEthApi, SequencerClient, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::BlockBody; +use reth_primitives::{BlockBody, TransactionSigned}; use reth_provider::{ providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, @@ -468,7 +468,7 @@ where Pool: TransactionPool>> + Unpin + 'static, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, { let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) .with_transactions(self.best_transactions) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 91e70d0c3c8..f370ed496f0 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -98,7 +98,7 @@ impl OpPayloadBuilder { } impl OpPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { /// Constructs an Optimism payload from the transactions sent via the @@ -155,7 +155,7 @@ where impl OpPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). @@ -217,7 +217,7 @@ impl PayloadBuilder for OpPayloadBui where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { type Attributes = OpPayloadBuilderAttributes; @@ -294,7 +294,7 @@ where ctx: &OpPayloadBuilderCtx, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, DB: Database, { let Self { pool, best } = self; @@ -339,7 +339,7 @@ where ctx: OpPayloadBuilderCtx, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, DB: Database + AsRef

, P: StateRootProvider + HashedPostStateProvider, { @@ -465,7 +465,7 @@ where ctx: &OpPayloadBuilderCtx, ) -> Result where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvm
, DB: Database + AsRef

, P: StateProofProvider, { @@ -700,7 +700,7 @@ impl OpPayloadBuilderCtx { impl OpPayloadBuilderCtx where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvm
, { /// apply eip-4788 pre block contract call pub fn apply_pre_beacon_root_contract_call( diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 9a8d169e527..eebb61c8cb0 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -7,7 +7,7 @@ use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ProviderTx, ReceiptProvider, StateProviderFactory, @@ -33,7 +33,7 @@ where + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, >, { #[inline] diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index 278c785cbe9..8cc4bd98ef2 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -9,7 +9,7 @@ use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_payload_builder::OpPayloadBuilder; -use reth_primitives::SealedHeader; +use reth_primitives::{SealedHeader, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -49,7 +49,7 @@ where + StateProviderFactory + ChainSpecProvider + 'static, - EvmConfig: ConfigureEvm
+ 'static, + EvmConfig: ConfigureEvm
+ 'static, { fn execute_payload( &self, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0b9a84a5b98..031f960096f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -50,7 +50,7 @@ //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: //! CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm
, +//! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { @@ -135,7 +135,7 @@ //! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, -//! EvmConfig: ConfigureEvm
, +//! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index cf701187dad..bffd4fa308e 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } +reth-primitives-traits.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-primitives.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index f9441f0630a..aaa2ce131c9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -19,7 +19,7 @@ use futures::Future; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_api::BlockBody; -use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_revm::{ database::StateProviderDatabase, @@ -664,14 +664,15 @@ pub trait Call: LoadState> + SpawnBlocking { where DB: Database + DatabaseCommit, EthApiError: From, - I: IntoIterator, + I: IntoIterator::Transaction)>, + ::Transaction: SignedTransaction, { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = self.evm_config().evm_with_env(db, env); let mut index = 0; for (sender, tx) in transactions { - if tx.hash() == target_tx_hash { + if *tx.tx_hash() == target_tx_hash { // reached the target transaction break } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 4c81626649d..c5bb0994607 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -56,7 +56,7 @@ pub trait LoadPendingBlock: + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
>, > { /// Returns a handle to the pending block. diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 34c0ae96261..afd69a2f404 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -31,7 +31,7 @@ where Pool: TransactionPool< Transaction: PoolTransaction>, >, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
>, >, { #[inline] diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index d9e341c02cc..8990ba2252e 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -84,6 +84,8 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 29d5051434b..03ed1fa6943 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -148,6 +148,7 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { From 35cfd41863e6c865f464353b0fb8b67337be0f86 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:55:04 -0500 Subject: [PATCH 860/970] feat: make `import_transactions` use network generics (#13110) --- crates/net/eth-wire-types/src/primitives.rs | 6 ++- crates/net/network/src/transactions/mod.rs | 43 ++++++++++----------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index ff7ab1c801b..1b0c16c0622 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -21,6 +21,7 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// The block body type. type BlockBody: Encodable + Decodable @@ -32,6 +33,7 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// Full block type. type Block: Block
+ Encodable @@ -58,8 +60,10 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// The transaction type which peers return in `PooledTransactions` messages. - type PooledTransaction: Encodable + type PooledTransaction: TryFrom + + Encodable + Decodable + Send + Sync diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index b352dfe3136..4a7167a8064 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -48,7 +48,10 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned}; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, PooledTransactionsElement, RecoveredTx, + TransactionSigned, +}; use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ @@ -272,7 +275,7 @@ pub struct TransactionsManager, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). - transaction_events: UnboundedMeteredReceiver, + transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. config: TransactionsManagerConfig, /// `TransactionsManager` metrics @@ -697,13 +700,15 @@ where impl TransactionsManager where - Pool: TransactionPool, + Pool: TransactionPool + 'static, N: NetworkPrimitives< BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, >, - Pool::Transaction: - PoolTransaction>, + Pool::Transaction: PoolTransaction< + Consensus = N::BroadcastedTransaction, + Pooled: Into + From>, + >, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -1099,16 +1104,9 @@ where _ => {} } } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, - Pool::Transaction: - PoolTransaction>, -{ /// Handles dedicated transaction events related to the `eth` protocol. - fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { + fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { // ensure we didn't receive any blob transactions as these are disallowed to be @@ -1119,7 +1117,7 @@ where let non_blob_txs = msg .0 .into_iter() - .map(PooledTransactionsElement::try_from_broadcast) + .map(N::PooledTransaction::try_from) .filter_map(Result::ok) .collect(); @@ -1146,7 +1144,7 @@ where fn import_transactions( &mut self, peer_id: PeerId, - transactions: PooledTransactions, + transactions: PooledTransactions, source: TransactionSource, ) { // If the node is pipeline syncing, ignore transactions @@ -1162,7 +1160,7 @@ where // mark the transactions as received self.transaction_fetcher - .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); + .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.tx_hash())); // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` @@ -1170,7 +1168,7 @@ where // recorded the hashes as seen by this peer in `Self::on_new_pooled_transaction_hashes`. let mut num_already_seen_by_peer = 0; for tx in &transactions { - if source.is_broadcast() && !peer.seen_transactions.insert(*tx.hash()) { + if source.is_broadcast() && !peer.seen_transactions.insert(*tx.tx_hash()) { num_already_seen_by_peer += 1; } } @@ -1199,7 +1197,7 @@ where Err(badtx) => { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%badtx.hash(), + hash=%badtx.tx_hash(), client_version=%peer.client_version, "failed ecrecovery for transaction" ); @@ -1208,22 +1206,23 @@ where } }; - match self.transactions_by_peers.entry(*tx.hash()) { + match self.transactions_by_peers.entry(*tx.tx_hash()) { Entry::Occupied(mut entry) => { // transaction was already inserted entry.get_mut().insert(peer_id); } Entry::Vacant(entry) => { - if self.bad_imports.contains(tx.hash()) { + if self.bad_imports.contains(tx.tx_hash()) { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%tx.hash(), + hash=%tx.tx_hash(), client_version=%peer.client_version, "received a known bad transaction from peer" ); has_bad_transactions = true; } else { // this is a new transaction that should be imported into the pool + let pool_transaction = Pool::Transaction::from_pooled(tx.into()); new_txs.push(pool_transaction); @@ -1285,7 +1284,7 @@ where } /// Processes a [`FetchEvent`]. - fn on_fetch_event(&mut self, fetch_event: FetchEvent) { + fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { FetchEvent::TransactionsFetched { peer_id, transactions } => { self.import_transactions(peer_id, transactions, TransactionSource::Response); From e9484b2437fa6d0bb9516d161afa1a4fdfe1422d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 3 Dec 2024 21:55:08 +0100 Subject: [PATCH 861/970] chore: remove redundant enum (#13113) --- crates/rpc/rpc-engine-api/src/lib.rs | 4 ---- crates/rpc/rpc-engine-api/src/message.rs | 14 -------------- 2 files changed, 18 deletions(-) delete mode 100644 crates/rpc/rpc-engine-api/src/message.rs diff --git a/crates/rpc/rpc-engine-api/src/lib.rs b/crates/rpc/rpc-engine-api/src/lib.rs index a2da00eee70..a9305a00820 100644 --- a/crates/rpc/rpc-engine-api/src/lib.rs +++ b/crates/rpc/rpc-engine-api/src/lib.rs @@ -15,9 +15,6 @@ mod engine_api; /// Engine API capabilities. pub mod capabilities; -/// The Engine API message type. -mod message; - /// Engine API error. mod error; @@ -26,7 +23,6 @@ mod metrics; pub use engine_api::{EngineApi, EngineApiSender}; pub use error::*; -pub use message::EngineApiMessageVersion; // re-export server trait for convenience pub use reth_rpc_api::EngineApiServer; diff --git a/crates/rpc/rpc-engine-api/src/message.rs b/crates/rpc/rpc-engine-api/src/message.rs deleted file mode 100644 index c0d6b85d511..00000000000 --- a/crates/rpc/rpc-engine-api/src/message.rs +++ /dev/null @@ -1,14 +0,0 @@ -/// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum EngineApiMessageVersion { - /// Version 1 - V1, - /// Version 2 - /// - /// Added for shanghai hardfork. - V2, - /// Version 3 - /// - /// Added for cancun hardfork. - V3, -} From 601e8b9147ac720849dbb2888549c4190762aa9c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:06:29 -0500 Subject: [PATCH 862/970] feat: make TransactionsManager Future impl generic over NetworkPrimitives (#13115) --- crates/net/eth-wire-types/src/primitives.rs | 14 +------ .../net/network/src/transactions/fetcher.rs | 39 ++++++++++--------- crates/net/network/src/transactions/mod.rs | 15 ++++--- 3 files changed, 33 insertions(+), 35 deletions(-) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 1b0c16c0622..78083e9e092 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,7 +1,7 @@ //! Abstraction over primitive types in network messages. use alloy_rlp::{Decodable, Encodable}; -use reth_primitives_traits::{Block, BlockHeader}; +use reth_primitives_traits::{Block, BlockHeader, SignedTransaction}; use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See @@ -62,17 +62,7 @@ pub trait NetworkPrimitives: + 'static; /// The transaction type which peers return in `PooledTransactions` messages. - type PooledTransaction: TryFrom - + Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type PooledTransaction: SignedTransaction + TryFrom + 'static; /// The transaction type which peers return in `GetReceipts` messages. type Receipt: Encodable diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 180a619fff9..025ae36ea14 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -50,6 +50,7 @@ use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::SignedTransaction; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; @@ -895,16 +896,14 @@ impl TransactionFetcher { approx_capacity_get_pooled_transactions_req_eth66() } } -} -impl TransactionFetcher { /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). pub fn on_resolved_get_pooled_transactions_request_fut( &mut self, - response: GetPooledTxResponse, - ) -> FetchEvent { + response: GetPooledTxResponse, + ) -> FetchEvent { // update peer activity, requests for buffered hashes can only be made to idle // fallback peers let GetPooledTxResponse { peer_id, mut requested_hashes, result } = response; @@ -1026,8 +1025,8 @@ impl TransactionFetcher { } } -impl Stream for TransactionFetcher { - type Item = FetchEvent; +impl Stream for TransactionFetcher { + type Item = FetchEvent; /// Advances all inflight requests and returns the next event. fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -1176,18 +1175,18 @@ impl Future for GetPooledTxRequestFut { /// Wrapper of unverified [`PooledTransactions`]. #[derive(Debug, Constructor, Deref)] -pub struct UnverifiedPooledTransactions { - txns: PooledTransactions, +pub struct UnverifiedPooledTransactions { + txns: PooledTransactions, } /// [`PooledTransactions`] that have been successfully verified. #[derive(Debug, Constructor, Deref)] -pub struct VerifiedPooledTransactions { - txns: PooledTransactions, +pub struct VerifiedPooledTransactions { + txns: PooledTransactions, } -impl DedupPayload for VerifiedPooledTransactions { - type Value = PooledTransactionsElement; +impl DedupPayload for VerifiedPooledTransactions { + type Value = T; fn is_empty(&self) -> bool { self.txns.is_empty() @@ -1199,26 +1198,30 @@ impl DedupPayload for VerifiedPooledTransactions { fn dedup(self) -> PartiallyValidData { PartiallyValidData::from_raw_data( - self.txns.into_iter().map(|tx| (*tx.hash(), tx)).collect(), + self.txns.into_iter().map(|tx| (*tx.tx_hash(), tx)).collect(), None, ) } } trait VerifyPooledTransactionsResponse { + type Transaction: SignedTransaction; + fn verify( self, requested_hashes: &RequestTxHashes, peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions); + ) -> (VerificationOutcome, VerifiedPooledTransactions); } -impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { +impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { + type Transaction = T; + fn verify( self, requested_hashes: &RequestTxHashes, _peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions) { + ) -> (VerificationOutcome, VerifiedPooledTransactions) { let mut verification_outcome = VerificationOutcome::Ok; let Self { mut txns } = self; @@ -1229,11 +1232,11 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { let mut tx_hashes_not_requested_count = 0; txns.0.retain(|tx| { - if !requested_hashes.contains(tx.hash()) { + if !requested_hashes.contains(tx.tx_hash()) { verification_outcome = VerificationOutcome::ReportPeer; #[cfg(debug_assertions)] - tx_hashes_not_requested.push(*tx.hash()); + tx_hashes_not_requested.push(*tx.tx_hash()); #[cfg(not(debug_assertions))] { tx_hashes_not_requested_count += 1; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 4a7167a8064..a1097dacf55 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -49,8 +49,7 @@ use reth_network_p2p::{ use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, PooledTransactionsElement, RecoveredTx, - TransactionSigned, + transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, TransactionSigned, }; use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; @@ -1307,11 +1306,17 @@ where // // spawned in `NodeConfig::start_network`(reth_node_core::NodeConfig) and // `NetworkConfig::start_network`(reth_network::NetworkConfig) -impl Future for TransactionsManager +impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, - Pool::Transaction: - PoolTransaction>, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, + Pool::Transaction: PoolTransaction< + Consensus = N::BroadcastedTransaction, + Pooled: Into + From>, + >, { type Output = (); From bdfbcab53138c9fdf64c136aac6dcb2a84f6ff94 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:06:36 -0500 Subject: [PATCH 863/970] feat: use generic CapabilityMessage everywhere (#13116) --- crates/net/network/src/manager.rs | 2 +- crates/net/network/src/session/handle.rs | 2 +- crates/net/network/src/session/mod.rs | 2 +- crates/net/network/src/swarm.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index e123377acd2..bad6ecba5fa 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -398,7 +398,7 @@ impl NetworkManager { &mut self, peer_id: PeerId, _capabilities: Arc, - _message: CapabilityMessage, + _message: CapabilityMessage, ) { trace!(target: "net", ?peer_id, "received unexpected message"); self.swarm diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index d167dc0e6ec..d24d7ec6841 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -264,7 +264,7 @@ pub enum ActiveSessionMessage { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index a020c540e38..b19281b079a 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -710,7 +710,7 @@ pub enum SessionEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 47447783f42..c4a2bd14d36 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -350,7 +350,7 @@ pub(crate) enum SwarmEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { From c6add45c0dea83b0ea5ab2fb53030d1d2348bd89 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:06:42 -0500 Subject: [PATCH 864/970] chore: propagate network primitives generic to EthStream usages (#13117) --- crates/net/network/src/session/active.rs | 2 +- examples/manual-p2p/src/main.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 76701f7e2ab..af9bb2f0856 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -839,7 +839,7 @@ mod tests { f: F, ) -> Pin + Send>> where - F: FnOnce(EthStream>>) -> O + Send + 'static, + F: FnOnce(EthStream>, N>) -> O + Send + 'static, O: Future + Send + Sync, { let status = self.status; diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 79a2ff26a27..15b14d98ea8 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -16,7 +16,7 @@ use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; -use reth_network::config::rng_secret_key; +use reth_network::{config::rng_secret_key, EthNetworkPrimitives}; use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; use reth_primitives::{EthereumHardfork, Head}; use secp256k1::{SecretKey, SECP256K1}; @@ -24,7 +24,7 @@ use std::sync::LazyLock; use tokio::net::TcpStream; type AuthedP2PStream = P2PStream>; -type AuthedEthStream = EthStream>>; +type AuthedEthStream = EthStream>, EthNetworkPrimitives>; pub static MAINNET_BOOT_NODES: LazyLock> = LazyLock::new(mainnet_nodes); From 8a00d2d25cb81bd0137d0fcab57133b78d95657b Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 4 Dec 2024 04:13:47 +0400 Subject: [PATCH 865/970] chore: relax rpc bounds (#13100) --- .../rpc-eth-api/src/helpers/transaction.rs | 31 +++++++++++++------ crates/rpc/rpc-eth-api/src/types.rs | 7 ++--- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 9d77e01193b..3b4ecb9de27 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -8,7 +8,10 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{SealedBlockWithSenders, TransactionMeta, TransactionSigned}; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, SealedBlockWithSenders, TransactionMeta, + TransactionSigned, +}; use reth_provider::{ BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, @@ -120,10 +123,13 @@ pub trait EthTransactions: LoadTransaction { } /// Returns the _historical_ transaction and the block it was mined in + #[expect(clippy::type_complexity)] fn historical_transaction_by_hash_at( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>, B256)>, Self::Error>, + > + Send { async move { match self.transaction_by_hash_at(hash).await? { None => Ok(None), @@ -475,11 +481,7 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: - SpawnBlocking - + FullEthApiTypes - + RpcNodeCoreExt -{ +pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { /// Returns the transaction by hash. /// /// Checks the pool and state. @@ -539,11 +541,16 @@ pub trait LoadTransaction: /// Returns the transaction by including its corresponding [`BlockId`]. /// /// Note: this supports pending transactions + #[expect(clippy::type_complexity)] fn transaction_by_hash_at( &self, transaction_hash: B256, - ) -> impl Future, Self::Error>> + Send - { + ) -> impl Future< + Output = Result< + Option<(TransactionSource>, BlockId)>, + Self::Error, + >, + > + Send { async move { Ok(self.transaction_by_hash(transaction_hash).await?.map(|tx| match tx { tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), @@ -555,11 +562,15 @@ pub trait LoadTransaction: } /// Fetches the transaction and the transaction's block + #[expect(clippy::type_complexity)] fn transaction_and_block( &self, hash: B256, ) -> impl Future< - Output = Result)>, Self::Error>, + Output = Result< + Option<(TransactionSource>, Arc)>, + Self::Error, + >, > + Send { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 62af1432b11..c97ea5735ee 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -7,7 +7,6 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; -use reth_primitives::TransactionSigned; use reth_provider::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -49,8 +48,7 @@ pub type RpcError = ::Error; pub trait FullEthApiTypes where Self: RpcNodeCore< - Provider: TransactionsProvider - + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider, Pool: TransactionPool< Transaction: PoolTransaction>, >, @@ -66,8 +64,7 @@ where impl FullEthApiTypes for T where T: RpcNodeCore< - Provider: TransactionsProvider - + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider, Pool: TransactionPool< Transaction: PoolTransaction>, >, From d164e3ec982593f5fc67f0d5d13e102562fc047e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 3 Dec 2024 19:14:25 -0500 Subject: [PATCH 866/970] chore: bound most NetworkBuilder methods by NetworkPrimitives generic (#13119) --- .../net/eth-wire/tests/pooled_transactions.rs | 4 +- crates/net/network/src/builder.rs | 40 ++++++++++--------- crates/net/network/src/config.rs | 2 +- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 3b17d04cba5..93a17f3b05b 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -3,7 +3,7 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; -use reth_eth_wire::{EthVersion, PooledTransactions, ProtocolMessage}; +use reth_eth_wire::{EthNetworkPrimitives, EthVersion, PooledTransactions, ProtocolMessage}; use reth_primitives::PooledTransactionsElement; use std::{fs, path::PathBuf}; use test_fuzz::test_fuzz; @@ -51,7 +51,7 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs: ProtocolMessage = + let _txs: ProtocolMessage = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index da003a2e290..13c932d4644 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -24,35 +24,50 @@ pub struct NetworkBuilder // === impl NetworkBuilder === -impl NetworkBuilder { +impl NetworkBuilder { /// Consumes the type and returns all fields. - pub fn split(self) -> (NetworkManager, Tx, Eth) { + pub fn split(self) -> (NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; (network, transactions, request_handler) } /// Returns the network manager. - pub const fn network(&self) -> &NetworkManager { + pub const fn network(&self) -> &NetworkManager { &self.network } /// Returns the mutable network manager. - pub fn network_mut(&mut self) -> &mut NetworkManager { + pub fn network_mut(&mut self) -> &mut NetworkManager { &mut self.network } /// Returns the handle to the network. - pub fn handle(&self) -> NetworkHandle { + pub fn handle(&self) -> NetworkHandle { self.network.handle().clone() } /// Consumes the type and returns all fields and also return a [`NetworkHandle`]. - pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { + pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; let handle = network.handle().clone(); (handle, network, transactions, request_handler) } + /// Creates a new [`EthRequestHandler`] and wires it to the network. + pub fn request_handler( + self, + client: Client, + ) -> NetworkBuilder, N> { + let Self { mut network, transactions, .. } = self; + let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); + network.set_eth_request_handler(tx); + let peers = network.handle().peers_handle().clone(); + let request_handler = EthRequestHandler::new(client, peers, rx); + NetworkBuilder { network, request_handler, transactions } + } +} + +impl NetworkBuilder { /// Creates a new [`TransactionsManager`] and wires it to the network. pub fn transactions( self, @@ -66,17 +81,4 @@ impl NetworkBuilder { let transactions = TransactionsManager::new(handle, pool, rx, transactions_manager_config); NetworkBuilder { network, request_handler, transactions } } - - /// Creates a new [`EthRequestHandler`] and wires it to the network. - pub fn request_handler( - self, - client: Client, - ) -> NetworkBuilder> { - let Self { mut network, transactions, .. } = self; - let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); - network.set_eth_request_handler(tx); - let peers = network.handle().peers_handle().clone(); - let request_handler = EthRequestHandler::new(client, peers, rx); - NetworkBuilder { network, request_handler, transactions } - } } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 44f34c3a4b0..a9ce67821b9 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -94,7 +94,7 @@ impl NetworkConfig<(), N> { } /// Convenience method for creating the corresponding builder type with a random secret key. - pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { + pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { NetworkConfigBuilder::with_rng_secret_key() } } From 4e73bb04c252023fdce8cac6c3e3de90f9972834 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 4 Dec 2024 04:15:03 +0400 Subject: [PATCH 867/970] refactor: pending block construction (#13109) --- .../rpc-eth-api/src/helpers/pending_block.rs | 145 ++++++++---------- crates/rpc/rpc-eth-types/src/pending_block.rs | 30 ++-- 2 files changed, 72 insertions(+), 103 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index c5bb0994607..8a6e5c84be1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -8,17 +8,18 @@ use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types_eth::BlockNumberOrTag; +use alloy_rpc_types_eth::{BlockNumberOrTag, Withdrawals}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_errors::RethError; use reth_evm::{ state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, - Receipt, RecoveredTx, SealedBlockWithSenders, SealedHeader, + Receipt, RecoveredTx, SealedBlockWithSenders, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -27,7 +28,7 @@ use reth_provider::{ use reth_revm::{ database::StateProviderDatabase, primitives::{ - BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, }; @@ -68,55 +69,56 @@ pub trait LoadPendingBlock: /// /// If no pending block is available, this will derive it from the `latest` block fn pending_block_env_and_cfg(&self) -> Result { - let origin: PendingBlockEnvOrigin = if let Some(pending) = + if let Some(block) = self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? { - PendingBlockEnvOrigin::ActualPending(pending) - } else { - // no pending block from the CL yet, so we use the latest block and modify the env - // values that we can - let latest = self + if let Some(receipts) = self .provider() - .latest_header() + .receipts_by_block(block.hash().into()) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - - let (mut latest_header, block_hash) = latest.split(); - // child block - latest_header.number += 1; - // assumed child block is in the next slot: 12s - latest_header.timestamp += 12; - // base fee of the child block - let chain_spec = self.provider().chain_spec(); - - latest_header.base_fee_per_gas = latest_header.next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest_header.timestamp()), - ); - - // update excess blob gas consumed above target - latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); - - // we're reusing the same block hash because we need this to lookup the block's state - let latest = SealedHeader::new(latest_header, block_hash); - - PendingBlockEnvOrigin::DerivedFromLatest(latest) - }; + { + // Note: for the PENDING block we assume it is past the known merge block and + // thus this will not fail when looking up the total + // difficulty value for the blockenv. + let (cfg, block_env) = self + .provider() + .env_with_header(block.header(), self.evm_config().clone()) + .map_err(Self::Error::from_eth_err)?; + + return Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::ActualPending(block, receipts), + )); + } + } - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - let mut block_env = BlockEnv::default(); - // Note: for the PENDING block we assume it is past the known merge block and thus this will - // not fail when looking up the total difficulty value for the blockenv. - self.provider() - .fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.evm_config().clone(), + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let latest = self + .provider() + .latest_header() + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let (cfg, block_env) = self + .evm_config() + .next_cfg_and_block_env( + &latest, + NextBlockEnvAttributes { + timestamp: latest.timestamp() + 12, + suggested_fee_recipient: latest.beneficiary(), + prev_randao: B256::random(), + }, ) + .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; - Ok(PendingBlockEnv::new(cfg, block_env, origin)) + Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::DerivedFromLatest(latest.hash()), + )) } /// Returns the locally built pending block @@ -137,18 +139,12 @@ pub trait LoadPendingBlock: { async move { let pending = self.pending_block_env_and_cfg()?; - if pending.origin.is_actual_pending() { - if let Some(block) = pending.origin.clone().into_actual_pending() { - // we have the real pending block, so we should also have its receipts - if let Some(receipts) = self - .provider() - .receipts_by_block(block.hash().into()) - .map_err(Self::Error::from_eth_err)? - { - return Ok(Some((block, receipts))) - } + let parent_hash = match pending.origin { + PendingBlockEnvOrigin::ActualPending(block, receipts) => { + return Ok(Some((block, receipts))); } - } + PendingBlockEnvOrigin::DerivedFromLatest(parent_hash) => parent_hash, + }; // we couldn't find the real pending block, so we need to build it ourselves let mut lock = self.pending_block().lock().await; @@ -159,7 +155,7 @@ pub trait LoadPendingBlock: if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header if pending.block_env.number.to::() == pending_block.block.number && - pending.origin.header().hash() == pending_block.block.parent_hash && + parent_hash == pending_block.block.parent_hash && now <= pending_block.expires_at { return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); @@ -170,7 +166,7 @@ pub trait LoadPendingBlock: let (sealed_block, receipts) = match self .spawn_blocking_io(move |this| { // we rebuild the block - this.build_block(pending) + this.build_block(pending.cfg, pending.block_env, parent_hash) }) .await { @@ -229,14 +225,13 @@ pub trait LoadPendingBlock: /// block contract call using the parent beacon block root received from the CL. fn build_block( &self, - env: PendingBlockEnv, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: B256, ) -> Result<(SealedBlockWithSenders, Vec), Self::Error> where EthApiError: From, { - let PendingBlockEnv { cfg, block_env, origin } = env; - - let parent_hash = origin.build_target_hash(); let state_provider = self .provider() .history_by_block_hash(parent_hash) @@ -258,34 +253,16 @@ pub trait LoadPendingBlock: block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let (withdrawals, withdrawals_root) = match origin { - PendingBlockEnvOrigin::ActualPending(ref block) => { - (block.body.withdrawals.clone(), block.withdrawals_root) - } - PendingBlockEnvOrigin::DerivedFromLatest(_) => (None, None), - }; + let withdrawals: Option = None; + let withdrawals_root = None; let chain_spec = self.provider().chain_spec(); let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); - let parent_beacon_block_root = if origin.is_actual_pending() { - // apply eip-4788 pre block contract call if we got the block from the CL with the real - // parent beacon block root - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - origin.header().parent_beacon_block_root, - ) - .map_err(|err| EthApiError::Internal(err.into()))?; - origin.header().parent_beacon_block_root - } else { - None - }; + let parent_beacon_block_root = None; system_caller - .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, origin.header().hash()) + .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; let mut receipts = Vec::new(); diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 116026c2ddd..bd23e3f42ab 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -8,7 +8,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -25,28 +25,28 @@ pub struct PendingBlockEnv { /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders, Vec), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(B256), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { - matches!(self, Self::ActualPending(_)) + matches!(self, Self::ActualPending(_, _)) } /// Consumes the type and returns the actual pending block. pub fn into_actual_pending(self) -> Option> { match self { - Self::ActualPending(block) => Some(block), + Self::ActualPending(block, _) => Some(block), _ => None, } } @@ -57,8 +57,8 @@ impl PendingBlockEnvOrigin { /// identify the block by its hash (latest block). pub fn state_block_id(&self) -> BlockId { match self { - Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), - Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + Self::ActualPending(_, _) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(hash) => BlockId::Hash((*hash).into()), } } @@ -69,16 +69,8 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.header().parent_hash(), - Self::DerivedFromLatest(header) => header.hash(), - } - } - - /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { - match self { - Self::ActualPending(block) => &block.header, - Self::DerivedFromLatest(header) => header, + Self::ActualPending(block, _) => block.header().parent_hash(), + Self::DerivedFromLatest(hash) => *hash, } } } From 5c07669ef6e1db8a1acc487fea92c1b6aa81cc67 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 4 Dec 2024 11:59:01 +0400 Subject: [PATCH 868/970] chore: remove `TransactionSignedNoHash` (#13120) --- .../cli/commands/src/test_vectors/compact.rs | 4 +- .../cli/commands/src/test_vectors/tables.rs | 4 +- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 304 +++++------------- crates/primitives/src/transaction/tx_type.rs | 2 +- .../stages/src/stages/hashing_storage.rs | 5 +- .../stages/stages/src/test_utils/test_db.rs | 2 +- crates/storage/db-api/src/models/mod.rs | 3 +- crates/storage/db/src/tables/mod.rs | 4 +- .../src/providers/blockchain_provider.rs | 6 +- .../src/providers/static_file/manager.rs | 4 +- docs/design/database.md | 2 +- 12 files changed, 96 insertions(+), 246 deletions(-) diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 5490f568d3a..c321e35be73 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -22,7 +22,7 @@ use reth_db::{ }; use reth_fs_util as fs; use reth_primitives::{ - Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSignedNoHash, TxType, + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSigned, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -111,7 +111,7 @@ compact_types!( StoredBlockBodyIndices, StoredBlockWithdrawals, // Manual implementations - TransactionSignedNoHash, + TransactionSigned, // Bytecode, // todo revm arbitrary StorageEntry, // MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index acb811b75df..f845d2a6613 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -11,7 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; -use reth_primitives::TransactionSignedNoHash; +use reth_primitives::TransactionSigned; use std::collections::HashSet; use tracing::error; @@ -74,7 +74,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { (BlockBodyIndices, PER_TABLE, TABLE), (BlockOmmers
, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), - (Transactions, 100, TABLE), + (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) ]); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 97407ba610c..2844c9397b8 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -53,7 +53,7 @@ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, BlobTransaction, InvalidTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, + TransactionSigned, TransactionSignedEcRecovered, TxType, }; // Re-exports diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index aaa6b82dc4e..d0b88c4b179 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -780,230 +780,6 @@ impl From for Transaction { } } -/// Signed transaction without its Hash. Used type for inserting into the DB. -/// -/// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TransactionSignedNoHash { - /// The transaction signature values - pub signature: Signature, - /// Raw transaction info - #[deref] - #[as_ref] - pub transaction: Transaction, -} - -impl TransactionSignedNoHash { - /// Calculates the transaction hash. If used more than once, it's better to convert it to - /// [`TransactionSigned`] first. - pub fn hash(&self) -> B256 { - // pre-allocate buffer for the transaction - let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); - self.transaction.eip2718_encode(&self.signature, &mut buf); - keccak256(&buf) - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Reuses a given buffer to avoid numerous reallocations when recovering batches. **Clears the - /// buffer before use.** - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - /// - /// # Optimism - /// - /// For optimism this will return [`Address::ZERO`] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: - /// This makes it possible to import pre bedrock transactions via the sender recovery stage. - pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ - buffer.clear(); - self.transaction.encode_for_signing(buffer); - - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - { - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == TxDeposit::signature() { - return Some(Address::ZERO) - } - } - - recover_signer_unchecked(&self.signature, keccak256(buffer)) - } - - /// Converts into a transaction type with its hash: [`TransactionSigned`]. - /// - /// Note: This will recalculate the hash of the transaction. - #[inline] - pub fn with_hash(self) -> TransactionSigned { - let Self { signature, transaction } = self; - TransactionSigned::new_unhashed(transaction, signature) - } - - /// Recovers a list of signers from a transaction list iterator - /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> - where - T: IntoParallelIterator + IntoIterator + Send, - { - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } - } -} - -impl Default for TransactionSignedNoHash { - fn default() -> Self { - Self { signature: Signature::test_signature(), transaction: Default::default() } - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for TransactionSignedNoHash { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let tx_signed = TransactionSigned::arbitrary(u)?; - - Ok(Self { signature: tx_signed.signature, transaction: tx_signed.transaction }) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSignedNoHash { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let start = buf.as_mut().len(); - - // Placeholder for bitflags. - // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] - buf.put_u8(0); - - let sig_bit = self.signature.to_compact(buf) as u8; - let zstd_bit = self.transaction.input().len() >= 32; - - let tx_bits = if zstd_bit { - let mut tmp = Vec::with_capacity(256); - if cfg!(feature = "std") { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) - } else { - let mut compressor = crate::compression::create_tx_compressor(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - } - } else { - self.transaction.to_compact(buf) as u8 - }; - - // Replace bitflags with the actual values - buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); - - buf.as_mut().len() - start - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - use bytes::Buf; - - // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] - let bitflags = buf.get_u8() as usize; - - let sig_bit = bitflags & 1; - let (signature, buf) = Signature::from_compact(buf, sig_bit); - - let zstd_bit = bitflags >> 3; - let (transaction, buf) = if zstd_bit != 0 { - if cfg!(feature = "std") { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); - - // TODO: enforce that zstd is only present at a "top" level type - - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - }) - } else { - let mut decompressor = crate::compression::create_tx_decompressor(); - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - } - } else { - let transaction_type = bitflags >> 1; - Transaction::from_compact(buf, transaction_type) - }; - - (Self { signature, transaction }, buf) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSigned { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let tx: TransactionSignedNoHash = self.clone().into(); - tx.to_compact(buf) - } - - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, buf) = TransactionSignedNoHash::from_compact(buf, len); - (tx.into(), buf) - } -} - -impl From for TransactionSigned { - fn from(tx: TransactionSignedNoHash) -> Self { - tx.with_hash() - } -} - -impl From for TransactionSignedNoHash { - fn from(tx: TransactionSigned) -> Self { - Self { signature: tx.signature, transaction: tx.transaction } - } -} - /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] #[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] @@ -1543,6 +1319,86 @@ impl Decodable2718 for TransactionSigned { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TransactionSigned { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let start = buf.as_mut().len(); + + // Placeholder for bitflags. + // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] + buf.put_u8(0); + + let sig_bit = self.signature.to_compact(buf) as u8; + let zstd_bit = self.transaction.input().len() >= 32; + + let tx_bits = if zstd_bit { + let mut tmp = Vec::with_capacity(256); + if cfg!(feature = "std") { + crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { + let mut compressor = compressor.borrow_mut(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + }) + } else { + let mut compressor = crate::compression::create_tx_compressor(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + } + } else { + self.transaction.to_compact(buf) as u8 + }; + + // Replace bitflags with the actual values + buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); + + buf.as_mut().len() - start + } + + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + + // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] + let bitflags = buf.get_u8() as usize; + + let sig_bit = bitflags & 1; + let (signature, buf) = Signature::from_compact(buf, sig_bit); + + let zstd_bit = bitflags >> 3; + let (transaction, buf) = if zstd_bit != 0 { + if cfg!(feature = "std") { + crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + let mut decompressor = decompressor.borrow_mut(); + + // TODO: enforce that zstd is only present at a "top" level type + + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + }) + } else { + let mut decompressor = crate::compression::create_tx_decompressor(); + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + } + } else { + let transaction_type = bitflags >> 1; + Transaction::from_compact(buf, transaction_type) + }; + + (Self { signature, transaction, hash: Default::default() }, buf) + } +} + macro_rules! impl_from_signed { ($($tx:ident),*) => { $( diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 784a976ab79..1d709b902b5 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -29,7 +29,7 @@ pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on -/// [`crate::TransactionSignedNoHash`]. Adding more transaction types will break the codec and +/// [`crate::TransactionSigned`]. Adding more transaction types will break the codec and /// database format. /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index dcabbe83ee6..0be84665bee 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -359,10 +359,7 @@ mod tests { transaction.hash(), next_tx_num, )?; - tx.put::( - next_tx_num, - transaction.clone().into(), - )?; + tx.put::(next_tx_num, transaction.clone())?; let (addr, _) = accounts.get_mut(rng.gen::() % n_accounts as usize).unwrap(); diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 2f9712f8436..5a6c12d8e00 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -267,7 +267,7 @@ impl TestStageDB { if let Some(txs_writer) = &mut txs_writer { txs_writer.append_transaction(next_tx_num, body_tx)?; } else { - tx.put::(next_tx_num, body_tx.clone().into())? + tx.put::(next_tx_num, body_tx.clone())? } next_tx_num += 1; Ok::<(), ProviderError>(()) diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 0a008bb88a5..7ded84e1720 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,7 +8,7 @@ use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType}; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned, TxType}; use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -226,7 +226,6 @@ impl_compression_for_compact!( StoredBlockWithdrawals, Bytecode, AccountBeforeTx, - TransactionSignedNoHash, TransactionSigned, CompactU256, StageCheckpoint, diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 8a11c4ac055..88cfdde44aa 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -31,7 +31,7 @@ use reth_db_api::{ }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Receipt, StorageEntry, TransactionSignedNoHash}; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned}; use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -348,7 +348,7 @@ tables! { } /// Canonical only Stores the transaction body for canonical transactions. - table Transactions { + table Transactions { type Key = TxNumber; type Value = T; } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 8330ef3a66e..521e1d959b3 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -29,7 +29,7 @@ use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, + TransactionSigned, }; use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -850,9 +850,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{ - BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, - }; + use reth_primitives::{BlockExt, Receipt, SealedBlock, StaticFileSegment}; use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index eca382af76c..7af071299cd 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -39,7 +39,7 @@ use reth_primitives::{ }, transaction::recover_signers, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; @@ -1706,7 +1706,7 @@ impl StatsReader for StaticFileProvider { .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), - tables::Transactions::::NAME => Ok(self + tables::Transactions::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Transactions) .map(|txs| txs + 1) .unwrap_or_default() diff --git a/docs/design/database.md b/docs/design/database.md index cf2a6c8fcc1..48fc8612cba 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -56,7 +56,7 @@ BlockWithdrawals { } Transactions { u64 TxNumber "PK" - TransactionSignedNoHash Data + TransactionSigned Data } TransactionHashNumbers { B256 TxHash "PK" From c060df92af24f691ef3c7baccabdff8f6d40f34e Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 4 Dec 2024 10:52:14 +0100 Subject: [PATCH 869/970] chore(ci): fix hive patch (#13123) --- .github/assets/hive/no_sim_build.diff | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff index 0b109efe7cd..6127a4ecb73 100644 --- a/.github/assets/hive/no_sim_build.diff +++ b/.github/assets/hive/no_sim_build.diff @@ -1,24 +1,23 @@ diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go -index 4731c9d..d717f52 100644 +index e4bf99b6..2023f7e2 100644 --- a/internal/libdocker/builder.go +++ b/internal/libdocker/builder.go -@@ -7,9 +7,7 @@ import ( - "fmt" +@@ -8,7 +8,6 @@ import ( "io" "io/fs" + "log/slog" - "os" "path/filepath" -- "strings" - - "github.com/ethereum/hive/internal/libhive" - docker "github.com/fsouza/go-dockerclient" -@@ -53,24 +51,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes + "slices" + "strings" +@@ -49,25 +48,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes // BuildSimulatorImage builds a docker image of a simulator. - func (b *Builder) BuildSimulatorImage(ctx context.Context, name string) (string, error) { + func (b *Builder) BuildSimulatorImage(ctx context.Context, name string, buildArgs map[string]string) (string, error) { - dir := b.config.Inventory.SimulatorDirectory(name) - buildContextPath := dir - buildDockerfile := "Dockerfile" +- - // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path - if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { - buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) @@ -32,14 +31,14 @@ index 4731c9d..d717f52 100644 - } - } tag := fmt.Sprintf("hive/simulators/%s:latest", name) -- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, nil) +- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, buildArgs) - return tag, err + return tag, nil } // BuildImage creates a container by archiving the given file system, diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go -index a53e5af..0bb2ea9 100644 +index d3a14ae6..8779671e 100644 --- a/internal/libdocker/proxy.go +++ b/internal/libdocker/proxy.go @@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" From 025885f2ad3fb881b1d2180e1998797f0b968b88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:50:46 +0100 Subject: [PATCH 870/970] refactor(sdk): complete generic impl for `PersistenceService` over `N::Primitives` (#13044) --- Cargo.lock | 213 +++++++++++++------------- crates/engine/local/src/service.rs | 2 +- crates/engine/service/Cargo.toml | 1 + crates/engine/service/src/service.rs | 3 +- crates/engine/tree/src/persistence.rs | 55 ++++--- 5 files changed, 143 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 855c02a22b9..b10c4d55d1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" @@ -161,7 +161,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -265,7 +265,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", ] @@ -291,7 +291,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -319,7 +319,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", "url", ] @@ -341,7 +341,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.2", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -390,7 +390,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", "url", @@ -523,7 +523,7 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -604,7 +604,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -642,7 +642,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -660,7 +660,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -686,7 +686,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.6.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", @@ -745,7 +745,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tower 0.5.1", "tracing", @@ -897,9 +897,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquamarine" @@ -1385,7 +1385,7 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.6.0", + "indexmap 2.7.0", "num-bigint", "rustc-hash 2.1.0", ] @@ -1411,7 +1411,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.6.0", + "indexmap 2.7.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1457,7 +1457,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.6.0", + "indexmap 2.7.0", "once_cell", "phf", "rustc-hash 2.1.0", @@ -1783,9 +1783,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" dependencies = [ "clap_builder", "clap_derive", @@ -1793,9 +1793,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" dependencies = [ "anstream", "anstyle", @@ -2655,7 +2655,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "walkdir", ] @@ -2811,7 +2811,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -2899,7 +2899,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", ] @@ -3538,7 +3538,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3699,9 +3699,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -4133,9 +4133,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "arbitrary", "equivalent", @@ -4162,7 +4162,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.7.0", "is-terminal", "itoa", "log", @@ -4891,7 +4891,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "metrics-util", "quanta", @@ -4923,7 +4923,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "ordered-float", "quanta", @@ -5342,7 +5342,7 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -5357,7 +5357,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -5394,7 +5394,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", "unsigned-varint", ] @@ -5434,7 +5434,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -5514,9 +5514,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5525,20 +5525,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 1.0.109", ] [[package]] @@ -6058,7 +6057,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -6077,7 +6076,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.4", "tinyvec", "tracing", "web-time", @@ -6535,7 +6534,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -6571,7 +6570,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tower 0.4.13", "tracing", @@ -6625,7 +6624,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -6781,7 +6780,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tikv-jemallocator", "tracy-client", ] @@ -6926,7 +6925,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -6983,7 +6982,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", ] @@ -7025,7 +7024,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -7050,7 +7049,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7076,7 +7075,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -7115,7 +7114,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7192,7 +7191,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7248,7 +7247,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", ] @@ -7275,7 +7274,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", ] @@ -7330,7 +7329,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7376,7 +7375,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -7409,7 +7408,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7438,7 +7437,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -7504,7 +7503,7 @@ dependencies = [ "proptest-derive", "rustc-hash 2.1.0", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -7605,7 +7604,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -7700,7 +7699,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", ] @@ -7727,7 +7726,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -7770,7 +7769,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7787,7 +7786,7 @@ dependencies = [ "criterion", "dashmap 6.1.0", "derive_more 1.0.0", - "indexmap 2.6.0", + "indexmap 2.7.0", "parking_lot", "pprof", "rand 0.8.5", @@ -7795,7 +7794,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", ] @@ -7834,7 +7833,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7894,7 +7893,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7919,7 +7918,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", ] @@ -7957,7 +7956,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "url", ] @@ -7988,7 +7987,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", "zstd", ] @@ -8122,7 +8121,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "toml", "tracing", @@ -8450,7 +8449,7 @@ dependencies = [ "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", ] @@ -8517,7 +8516,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -8582,7 +8581,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", ] @@ -8757,7 +8756,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -8778,7 +8777,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.4", "toml", ] @@ -8864,7 +8863,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tower 0.4.13", @@ -8958,7 +8957,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-util", "tower 0.4.13", @@ -8999,7 +8998,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -9083,7 +9082,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9183,7 +9182,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -9210,7 +9209,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9316,7 +9315,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", "tracing-futures", @@ -9400,7 +9399,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9515,7 +9514,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -9541,7 +9540,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -10180,7 +10179,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -10240,7 +10239,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -10484,9 +10483,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -10775,11 +10774,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.4", ] [[package]] @@ -10795,9 +10794,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" dependencies = [ "proc-macro2", "quote", @@ -10856,9 +10855,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -10880,9 +10879,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -10934,9 +10933,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -11042,7 +11041,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index b8cab99970a..6ce588a8264 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -88,7 +88,7 @@ where if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; let persistence_handle = - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 8854fd18879..326bc06b5e3 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,6 +18,7 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index bc3e36beafc..d839fab2c0e 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -19,6 +19,7 @@ use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; use reth_node_types::{BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; +use reth_primitives::EthPrimitives; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -90,7 +91,7 @@ where let downloader = BasicBlockDownloader::new(client, consensus.clone().as_consensus()); let persistence_handle = - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index dcdeee67448..2f0b20f02dc 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -30,11 +30,14 @@ impl PersistenceNodeTypes for T where T: ProviderNodeTypes { +pub struct PersistenceService +where + N: PersistenceNodeTypes, +{ /// The provider factory to use provider: ProviderFactory, /// Incoming requests - incoming: Receiver, + incoming: Receiver>, /// The pruner pruner: PrunerWithFactory>, /// metrics @@ -43,11 +46,14 @@ pub struct PersistenceService { sync_metrics_tx: MetricEventsSender, } -impl PersistenceService { +impl PersistenceService +where + N: PersistenceNodeTypes, +{ /// Create a new persistence service pub fn new( provider: ProviderFactory, - incoming: Receiver, + incoming: Receiver>, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, ) -> Self { @@ -66,7 +72,10 @@ impl PersistenceService { } } -impl PersistenceService { +impl PersistenceService +where + N: PersistenceNodeTypes, +{ /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -135,7 +144,7 @@ impl PersistenceService { fn on_save_blocks( &self, - blocks: Vec, + blocks: Vec>, ) -> Result, PersistenceError> { debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); @@ -194,27 +203,29 @@ pub enum PersistenceAction { #[derive(Debug, Clone)] pub struct PersistenceHandle { /// The channel used to communicate with the persistence service - sender: Sender, - _marker: std::marker::PhantomData, + sender: Sender>, } impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. - pub const fn new(sender: Sender) -> Self { - Self { sender, _marker: std::marker::PhantomData } + pub const fn new(sender: Sender>) -> Self { + Self { sender } } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + pub fn spawn_service( provider_factory: ProviderFactory, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> PersistenceHandle + where + N: PersistenceNodeTypes, + { // create the initial channels let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); // construct persistence handle - let persistence_handle = Self::new(db_service_tx); + let persistence_handle = PersistenceHandle::new(db_service_tx); // spawn the persistence service let db_service = @@ -235,8 +246,8 @@ impl PersistenceHandle { /// for creating any channels for the given action. pub fn send_action( &self, - action: PersistenceAction, - ) -> Result<(), SendError> { + action: PersistenceAction, + ) -> Result<(), SendError>> { self.sender.send(action) } @@ -250,9 +261,9 @@ impl PersistenceHandle { /// If there are no blocks to persist, then `None` is sent in the sender. pub fn save_blocks( &self, - blocks: Vec, + blocks: Vec>, tx: oneshot::Sender>, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveBlocks(blocks, tx)) } @@ -260,7 +271,7 @@ impl PersistenceHandle { pub fn save_finalized_block_number( &self, finalized_block: u64, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveFinalizedBlock(finalized_block)) } @@ -268,7 +279,7 @@ impl PersistenceHandle { pub fn save_safe_block_number( &self, safe_block: u64, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveSafeBlock(safe_block)) } @@ -281,7 +292,7 @@ impl PersistenceHandle { &self, block_num: u64, tx: oneshot::Sender>, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } } @@ -296,7 +307,7 @@ mod tests { use reth_prune::Pruner; use tokio::sync::mpsc::unbounded_channel; - fn default_persistence_handle() -> PersistenceHandle { + fn default_persistence_handle() -> PersistenceHandle { let provider = create_test_provider_factory(); let (_finished_exex_height_tx, finished_exex_height_rx) = @@ -306,7 +317,7 @@ mod tests { Pruner::new_with_factory(provider.clone(), vec![], 5, 0, None, finished_exex_height_rx); let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx) + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx) } #[tokio::test] From 8d1a332119cb4f80bfe24dbda86a458c4a4e6ad8 Mon Sep 17 00:00:00 2001 From: kien-rise <157339831+kien-rise@users.noreply.github.com> Date: Wed, 4 Dec 2024 19:48:15 +0700 Subject: [PATCH 871/970] feat: make `PoolInner` functions `pub` (#13124) --- crates/transaction-pool/src/pool/best.rs | 3 +- crates/transaction-pool/src/pool/mod.rs | 115 +++++++++++------------ 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index b770e3da4b0..a07df7cd509 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -77,7 +77,8 @@ impl Iterator for BestTransactionsWithFees { /// be executed on the current state, but only yields transactions that are ready to be executed /// now. While it contains all gapless transactions of a sender, it _always_ only returns the /// transaction with the current on chain nonce. -pub(crate) struct BestTransactions { +#[derive(Debug)] +pub struct BestTransactions { /// Contains a copy of _all_ transactions of the pending pool at the point in time this /// iterator was created. pub(crate) all: BTreeMap>, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index b5391b6e8d7..fce1b9acef8 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -166,7 +166,7 @@ where S: BlobStore, { /// Create a new transaction pool instance. - pub(crate) fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { + pub fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { Self { identifiers: Default::default(), validator, @@ -182,31 +182,31 @@ where } /// Returns the configured blob store. - pub(crate) const fn blob_store(&self) -> &S { + pub const fn blob_store(&self) -> &S { &self.blob_store } /// Returns stats about the size of the pool. - pub(crate) fn size(&self) -> PoolSize { + pub fn size(&self) -> PoolSize { self.get_pool_data().size() } /// Returns the currently tracked block - pub(crate) fn block_info(&self) -> BlockInfo { + pub fn block_info(&self) -> BlockInfo { self.get_pool_data().block_info() } /// Sets the currently tracked block - pub(crate) fn set_block_info(&self, info: BlockInfo) { + pub fn set_block_info(&self, info: BlockInfo) { self.pool.write().set_block_info(info) } /// Returns the internal [`SenderId`] for this address - pub(crate) fn get_sender_id(&self, addr: Address) -> SenderId { + pub fn get_sender_id(&self, addr: Address) -> SenderId { self.identifiers.write().sender_id_or_create(addr) } /// Returns all senders in the pool - pub(crate) fn unique_senders(&self) -> HashSet
{ + pub fn unique_senders(&self) -> HashSet
{ self.get_pool_data().unique_senders() } @@ -266,29 +266,24 @@ where /// If the pool contains the transaction, this adds a new listener that gets notified about /// transaction events. - pub(crate) fn add_transaction_event_listener( - &self, - tx_hash: TxHash, - ) -> Option { + pub fn add_transaction_event_listener(&self, tx_hash: TxHash) -> Option { self.get_pool_data() .contains(&tx_hash) .then(|| self.event_listener.write().subscribe(tx_hash)) } /// Adds a listener for all transaction events. - pub(crate) fn add_all_transactions_event_listener( - &self, - ) -> AllTransactionsEvents { + pub fn add_all_transactions_event_listener(&self) -> AllTransactionsEvents { self.event_listener.write().subscribe_all() } /// Returns a read lock to the pool's data. - pub(crate) fn get_pool_data(&self) -> RwLockReadGuard<'_, TxPool> { + pub fn get_pool_data(&self) -> RwLockReadGuard<'_, TxPool> { self.pool.read() } /// Returns hashes of _all_ transactions in the pool. - pub(crate) fn pooled_transactions_hashes(&self) -> Vec { + pub fn pooled_transactions_hashes(&self) -> Vec { self.get_pool_data() .all() .transactions_iter() @@ -298,12 +293,12 @@ where } /// Returns _all_ transactions in the pool. - pub(crate) fn pooled_transactions(&self) -> Vec>> { + pub fn pooled_transactions(&self) -> Vec>> { self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() } /// Returns only the first `max` transactions in the pool. - pub(crate) fn pooled_transactions_max( + pub fn pooled_transactions_max( &self, max: usize, ) -> Vec>> { @@ -340,7 +335,7 @@ where } /// Returns pooled transactions for the given transaction hashes. - pub(crate) fn get_pooled_transaction_elements( + pub fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, @@ -352,7 +347,7 @@ where } /// Returns pooled transactions for the given transaction hashes as the requested type. - pub(crate) fn get_pooled_transactions_as

( + pub fn get_pooled_transactions_as

( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, @@ -382,7 +377,7 @@ where } /// Returns converted pooled transaction for the given transaction hash. - pub(crate) fn get_pooled_transaction_element( + pub fn get_pooled_transaction_element( &self, tx_hash: TxHash, ) -> Option<<::Transaction as PoolTransaction>::Pooled> @@ -393,7 +388,7 @@ where } /// Updates the entire pool after a new block was executed. - pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { + pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); @@ -422,7 +417,7 @@ where /// Performs account updates on the pool. /// /// This will either promote or discard transactions based on the new account state. - pub(crate) fn update_accounts(&self, accounts: Vec) { + pub fn update_accounts(&self, accounts: Vec) { let changed_senders = self.changed_senders(accounts.into_iter()); let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); @@ -522,7 +517,8 @@ where } } - pub(crate) fn add_transaction_and_subscribe( + /// Adds a transaction and returns the event stream. + pub fn add_transaction_and_subscribe( &self, origin: TransactionOrigin, tx: TransactionValidationOutcome, @@ -676,13 +672,13 @@ where } /// Returns an iterator that yields transactions that are ready to be included in the block. - pub(crate) fn best_transactions(&self) -> BestTransactions { + pub fn best_transactions(&self) -> BestTransactions { self.get_pool_data().best_transactions() } /// Returns an iterator that yields transactions that are ready to be included in the block with /// the given base fee and optional blob fee attributes. - pub(crate) fn best_transactions_with_attributes( + pub fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, ) -> Box>>> @@ -691,7 +687,7 @@ where } /// Returns only the first `max` transactions in the pending pool. - pub(crate) fn pending_transactions_max( + pub fn pending_transactions_max( &self, max: usize, ) -> Vec>> { @@ -699,17 +695,17 @@ where } /// Returns all transactions from the pending sub-pool - pub(crate) fn pending_transactions(&self) -> Vec>> { + pub fn pending_transactions(&self) -> Vec>> { self.get_pool_data().pending_transactions() } /// Returns all transactions from parked pools - pub(crate) fn queued_transactions(&self) -> Vec>> { + pub fn queued_transactions(&self) -> Vec>> { self.get_pool_data().queued_transactions() } /// Returns all transactions in the pool - pub(crate) fn all_transactions(&self) -> AllPoolTransactions { + pub fn all_transactions(&self) -> AllPoolTransactions { let pool = self.get_pool_data(); AllPoolTransactions { pending: pool.pending_transactions(), @@ -718,7 +714,7 @@ where } /// Removes and returns all matching transactions from the pool. - pub(crate) fn remove_transactions( + pub fn remove_transactions( &self, hashes: Vec, ) -> Vec>> { @@ -736,7 +732,7 @@ where /// Removes and returns all matching transactions and their dependent transactions from the /// pool. - pub(crate) fn remove_transactions_and_descendants( + pub fn remove_transactions_and_descendants( &self, hashes: Vec, ) -> Vec>> { @@ -752,7 +748,8 @@ where removed } - pub(crate) fn remove_transactions_by_sender( + /// Removes and returns all transactions by the specified sender from the pool. + pub fn remove_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -767,7 +764,7 @@ where } /// Removes and returns all transactions that are present in the pool. - pub(crate) fn retain_unknown(&self, announcement: &mut A) + pub fn retain_unknown(&self, announcement: &mut A) where A: HandleMempoolData, { @@ -779,15 +776,12 @@ where } /// Returns the transaction by hash. - pub(crate) fn get( - &self, - tx_hash: &TxHash, - ) -> Option>> { + pub fn get(&self, tx_hash: &TxHash) -> Option>> { self.get_pool_data().get(tx_hash) } /// Returns all transactions of the address - pub(crate) fn get_transactions_by_sender( + pub fn get_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -796,7 +790,7 @@ where } /// Returns all queued transactions of the address by sender - pub(crate) fn get_queued_transactions_by_sender( + pub fn get_queued_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -805,7 +799,7 @@ where } /// Returns all pending transactions filtered by predicate - pub(crate) fn pending_transactions_with_predicate( + pub fn pending_transactions_with_predicate( &self, predicate: impl FnMut(&ValidPoolTransaction) -> bool, ) -> Vec>> { @@ -813,7 +807,7 @@ where } /// Returns all pending transactions of the address by sender - pub(crate) fn get_pending_transactions_by_sender( + pub fn get_pending_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -822,7 +816,7 @@ where } /// Returns the highest transaction of the address - pub(crate) fn get_highest_transaction_by_sender( + pub fn get_highest_transaction_by_sender( &self, sender: Address, ) -> Option>> { @@ -831,7 +825,7 @@ where } /// Returns the transaction with the highest nonce that is executable given the on chain nonce. - pub(crate) fn get_highest_consecutive_transaction_by_sender( + pub fn get_highest_consecutive_transaction_by_sender( &self, sender: Address, on_chain_nonce: u64, @@ -842,8 +836,16 @@ where ) } + /// Returns the transaction given a [`TransactionId`] + pub fn get_transaction_by_transaction_id( + &self, + transaction_id: &TransactionId, + ) -> Option>> { + self.get_pool_data().all().get(transaction_id).map(|tx| tx.transaction.clone()) + } + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] - pub(crate) fn get_transactions_by_origin( + pub fn get_transactions_by_origin( &self, origin: TransactionOrigin, ) -> Vec>> { @@ -851,7 +853,7 @@ where } /// Returns all pending transactions filted by [`TransactionOrigin`] - pub(crate) fn get_pending_transactions_by_origin( + pub fn get_pending_transactions_by_origin( &self, origin: TransactionOrigin, ) -> Vec>> { @@ -861,10 +863,7 @@ where /// Returns all the transactions belonging to the hashes. /// /// If no transaction exists, it is skipped. - pub(crate) fn get_all( - &self, - txs: Vec, - ) -> Vec>> { + pub fn get_all(&self, txs: Vec) -> Vec>> { if txs.is_empty() { return Vec::new() } @@ -872,7 +871,7 @@ where } /// Notify about propagated transactions. - pub(crate) fn on_propagated(&self, txs: PropagatedTransactions) { + pub fn on_propagated(&self, txs: PropagatedTransactions) { if txs.0.is_empty() { return } @@ -882,17 +881,17 @@ where } /// Number of transactions in the entire pool - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.get_pool_data().len() } /// Whether the pool is empty - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.get_pool_data().is_empty() } /// Returns whether or not the pool is over its configured size and transaction count limits. - pub(crate) fn is_exceeded(&self) -> bool { + pub fn is_exceeded(&self) -> bool { self.pool.read().is_exceeded() } @@ -900,7 +899,7 @@ where /// /// If some of the transactions are blob transactions, they are also removed from the blob /// store. - pub(crate) fn discard_worst(&self) -> HashSet { + pub fn discard_worst(&self) -> HashSet { let discarded = self.pool.write().discard_worst(); // delete any blobs associated with discarded blob transactions @@ -921,17 +920,17 @@ where } /// Delete a blob from the blob store - pub(crate) fn delete_blob(&self, blob: TxHash) { + pub fn delete_blob(&self, blob: TxHash) { let _ = self.blob_store.delete(blob); } /// Delete all blobs from the blob store - pub(crate) fn delete_blobs(&self, txs: Vec) { + pub fn delete_blobs(&self, txs: Vec) { let _ = self.blob_store.delete_all(txs); } /// Cleans up the blob store - pub(crate) fn cleanup_blobs(&self) { + pub fn cleanup_blobs(&self) { let stat = self.blob_store.cleanup(); self.blob_store_metrics.blobstore_failed_deletes.increment(stat.delete_failed as u64); self.update_blob_store_metrics(); From 53243a29f30021ee6061f1b11cb3560de1cd9ac2 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:13:35 +0700 Subject: [PATCH 872/970] perf: avoid cloning bytecode when converting revm's `&AccountInfo` to reth's `Account` (#13126) --- crates/evm/execution-types/src/execution_outcome.rs | 2 +- crates/primitives-traits/src/account.rs | 13 +++++++++++-- crates/revm/src/witness.rs | 2 +- crates/storage/provider/src/test_utils/blocks.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 2 +- crates/trie/trie/benches/hash_post_state.rs | 2 +- crates/trie/trie/src/state.rs | 4 ++-- 7 files changed, 18 insertions(+), 9 deletions(-) diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 1dca5f2fc9e..830508dc92d 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -147,7 +147,7 @@ impl ExecutionOutcome { /// Get account if account is known. pub fn account(&self, address: &Address) -> Option> { - self.bundle.account(address).map(|a| a.info.clone().map(Into::into)) + self.bundle.account(address).map(|a| a.info.as_ref().map(Into::into)) } /// Get storage if value is known. diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 398294b09d8..17f7f6f58ca 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -178,11 +178,20 @@ impl From<&GenesisAccount> for Account { impl From for Account { fn from(revm_acc: AccountInfo) -> Self { - let code_hash = revm_acc.code_hash; Self { balance: revm_acc.balance, nonce: revm_acc.nonce, - bytecode_hash: (code_hash != KECCAK_EMPTY).then_some(code_hash), + bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash), + } + } +} + +impl From<&AccountInfo> for Account { + fn from(revm_acc: &AccountInfo) -> Self { + Self { + balance: revm_acc.balance, + nonce: revm_acc.nonce, + bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash), } } } diff --git a/crates/revm/src/witness.rs b/crates/revm/src/witness.rs index c40c87d324b..6140de9d48a 100644 --- a/crates/revm/src/witness.rs +++ b/crates/revm/src/witness.rs @@ -45,7 +45,7 @@ impl ExecutionWitnessRecord { let hashed_address = keccak256(address); self.hashed_state .accounts - .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + .insert(hashed_address, account.account.as_ref().map(|a| (&a.info).into())); let storage = self .hashed_state diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index fdded2807aa..b5c0ba7a120 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -171,7 +171,7 @@ fn bundle_state_root(execution_outcome: &ExecutionOutcome) -> B256 { ( address, ( - Into::::into(info.clone()), + Into::::into(info), storage_root_unhashed( account .storage diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index dc5af491efc..7ab6499cc3e 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -357,7 +357,7 @@ mod tests { let reth_account_a = account_a.into(); let reth_account_b = account_b.into(); - let reth_account_b_changed = account_b_changed.clone().into(); + let reth_account_b_changed = (&account_b_changed).into(); // Check plain state assert_eq!( diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 7111a785f46..da47d01e15c 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -29,7 +29,7 @@ fn from_bundle_state_seq(state: &HashMap) -> HashedPostS for (address, account) in state { let hashed_address = keccak256(address); - this.accounts.insert(hashed_address, account.info.clone().map(Into::into)); + this.accounts.insert(hashed_address, account.info.as_ref().map(Into::into)); let hashed_storage = HashedStorage::from_iter( account.status.was_destroyed(), diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 3e390bf97bc..cc5c9d15eac 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -34,7 +34,7 @@ impl HashedPostState { .into_par_iter() .map(|(address, account)| { let hashed_address = KH::hash_key(address); - let hashed_account = account.info.clone().map(Into::into); + let hashed_account = account.info.as_ref().map(Into::into); let hashed_storage = HashedStorage::from_plain_storage( account.status, account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), @@ -61,7 +61,7 @@ impl HashedPostState { .into_par_iter() .map(|(address, account)| { let hashed_address = KH::hash_key(address); - let hashed_account = account.account.as_ref().map(|a| a.info.clone().into()); + let hashed_account = account.account.as_ref().map(|a| (&a.info).into()); let hashed_storage = HashedStorage::from_plain_storage( account.status, account.account.as_ref().map(|a| a.storage.iter()).into_iter().flatten(), From 589fc2a68d6f8974234d92a383ea550ae382f2a0 Mon Sep 17 00:00:00 2001 From: Woolfgm <160153877+Dahka2321@users.noreply.github.com> Date: Wed, 4 Dec 2024 14:45:51 +0100 Subject: [PATCH 873/970] Fix typos in documentation (#13094) --- .config/zepter.yaml | 2 +- examples/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.config/zepter.yaml b/.config/zepter.yaml index f5d320b4af9..22b0bf609e6 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -33,7 +33,7 @@ help: text: | Reth uses the Zepter CLI to detect abnormalities in Cargo features, e.g. missing propagation. - It looks like one more more checks failed; please check the console output. + It looks like one more checks failed; please check the console output. You can try to automatically address them by installing zepter (`cargo install zepter --locked`) and simply running `zepter` in the workspace root. links: diff --git a/examples/README.md b/examples/README.md index b7847c904a8..0b42c0c488b 100644 --- a/examples/README.md +++ b/examples/README.md @@ -30,7 +30,7 @@ See examples in a [dedicated repository](https://github.com/paradigmxyz/reth-exe | Example | Description | | ----------------------- | --------------------------------------------------------------------------- | -| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Rethdatabase instance | +| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Reth database instance | ## Database From 24af0a83a0714e6c446d6dabd40e4d088d7c2956 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 Dec 2024 07:56:05 -0600 Subject: [PATCH 874/970] Extend revm spec tests for holocene (#13111) --- crates/optimism/evm/src/config.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index f2d35ba56c4..4a37860efc6 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -92,6 +92,10 @@ mod tests { let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.holocene_activated()), 0), + revm_primitives::HOLOCENE + ); assert_eq!( revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.granite_activated()), 0), revm_primitives::GRANITE @@ -125,6 +129,10 @@ mod tests { let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } + assert_eq!( + revm_spec(&op_cs(|cs| cs.holocene_activated()), &Head::default()), + revm_primitives::HOLOCENE + ); assert_eq!( revm_spec(&op_cs(|cs| cs.granite_activated()), &Head::default()), revm_primitives::GRANITE From d298fb1b81b8300c794bd14e9aa47a23c081f866 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 4 Dec 2024 15:27:49 +0100 Subject: [PATCH 875/970] fix(op): add missing op consensus validation check (#13122) --- crates/optimism/bin/Cargo.toml | 2 +- crates/optimism/chainspec/src/lib.rs | 39 ++++++++++++------- crates/optimism/cli/Cargo.toml | 2 +- crates/optimism/consensus/Cargo.toml | 5 ++- crates/optimism/consensus/src/lib.rs | 31 ++++++++++++--- crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 2 +- crates/optimism/primitives/Cargo.toml | 5 ++- crates/optimism/primitives/src/lib.rs | 2 +- .../primitives/src/transaction/signed.rs | 24 ++++++------ crates/optimism/rpc/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 2 +- 12 files changed, 75 insertions(+), 43 deletions(-) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 60fde90f191..b182a4f278a 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -45,7 +45,7 @@ optimism = [ "reth-optimism-payload-builder/optimism", "reth-optimism-rpc/optimism", "reth-provider/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", ] dev = [ diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index a3dab80705e..0ee86bc7d24 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -21,7 +21,7 @@ use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; use alloy_consensus::Header; use alloy_genesis::Genesis; -use alloy_primitives::{Bytes, B256, U256}; +use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; use derive_more::{Constructor, Deref, Display, From, Into}; @@ -185,6 +185,28 @@ pub struct OpChainSpec { } impl OpChainSpec { + /// Extracts the Holcene 1599 parameters from the encoded extradata from the parent header. + /// + /// Caution: Caller must ensure that holocene is active in the parent header. + /// + /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) + pub fn decode_holocene_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + let (denominator, elasticity) = decode_holocene_1559_params(&parent.extra_data)?; + let base_fee = if elasticity == 0 && denominator == 0 { + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default() + } else { + let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); + parent.next_block_base_fee(base_fee_params).unwrap_or_default() + }; + Ok(base_fee) + } + /// Read from parent to determine the base fee for the next block /// /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) @@ -204,16 +226,7 @@ impl OpChainSpec { // from the parent block's extra data. // Else, use the base fee params (default values) from chainspec if is_holocene_activated { - let (denominator, elasticity) = decode_holocene_1559_params(parent.extra_data.clone())?; - if elasticity == 0 && denominator == 0 { - return Ok(U256::from( - parent - .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) - .unwrap_or_default(), - )); - } - let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); - Ok(U256::from(parent.next_block_base_fee(base_fee_params).unwrap_or_default())) + Ok(U256::from(self.decode_holocene_base_fee(parent, timestamp)?)) } else { Ok(U256::from( parent @@ -247,7 +260,7 @@ impl core::error::Error for DecodeError { /// Extracts the Holcene 1599 parameters from the encoded form: /// -pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), DecodeError> { +pub fn decode_holocene_1559_params(extra_data: &[u8]) -> Result<(u32, u32), DecodeError> { if extra_data.len() < 9 { return Err(DecodeError::InsufficientData); } @@ -492,7 +505,7 @@ mod tests { use std::sync::Arc; use alloy_genesis::{ChainConfig, Genesis}; - use alloy_primitives::b256; + use alloy_primitives::{b256, Bytes}; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OpHardfork, OpHardforks}; diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 48ea2d07dec..4e18b51160e 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -95,7 +95,7 @@ optimism = [ "reth-execution-types/optimism", "reth-db/optimism", "reth-db-api/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", "reth-downloaders/optimism" ] asm-keccak = [ diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 30f16e4eb22..faece6eacf8 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -22,7 +22,8 @@ reth-trie-common.workspace = true # op-reth reth-optimism-forks.workspace = true reth-optimism-chainspec.workspace = true -reth-optimism-primitives.workspace = true +# TODO: remove this after feature cleanup +reth-optimism-primitives = { workspace = true, features = ["serde"] } # ethereum alloy-primitives.workspace = true @@ -36,4 +37,4 @@ alloy-primitives.workspace = true reth-optimism-chainspec.workspace = true [features] -optimism = ["reth-primitives/optimism"] +optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index b50efd5f6f2..6d457f42c90 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,7 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{ @@ -112,11 +112,30 @@ impl HeaderValidator for OpBeaconConsensus { validate_against_parent_timestamp(header.header(), parent.header())?; } - validate_against_parent_eip1559_base_fee( - header.header(), - parent.header(), - &self.chain_spec, - )?; + // EIP1559 base fee validation + // + // > if Holocene is active in parent_header.timestamp, then the parameters from + // > parent_header.extraData are used. + if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp) { + let header_base_fee = + header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + let expected_base_fee = self + .chain_spec + .decode_holocene_base_fee(parent, header.timestamp) + .map_err(|_| ConsensusError::BaseFeeMissing)?; + if expected_base_fee != header_base_fee { + return Err(ConsensusError::BaseFeeDiff(GotExpected { + expected: expected_base_fee, + got: header_base_fee, + })) + } + } else { + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; + } // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 309ddc1cb4e..ab22e3e3e81 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -75,5 +75,5 @@ optimism = [ "reth-optimism-consensus/optimism", "revm/optimism", "revm-primitives/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index b0b7065f336..b833342282a 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -97,7 +97,7 @@ optimism = [ "reth-db/optimism", "reth-optimism-node/optimism", "reth-node-core/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", ] asm-keccak = [ "reth-primitives/asm-keccak", diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 38f76aa6256..ed8e9686fa7 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -101,6 +101,7 @@ arbitrary = [ "revm-primitives/arbitrary", "rand", ] -op = [ - "revm-primitives/optimism", +optimism = [ + "revm-primitives/optimism", + "reth-primitives/optimism" ] diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index df504211021..b1f029d20bc 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -7,7 +7,7 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] // The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "op")] +#![cfg(feature = "optimism")] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 2dc72026e7c..26581214e67 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -1,13 +1,7 @@ //! A signed Optimism transaction. +use crate::{OpTransaction, OpTxType}; use alloc::vec::Vec; -use core::{ - hash::{Hash, Hasher}, - mem, -}; -#[cfg(feature = "std")] -use std::sync::OnceLock; - use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, }; @@ -20,6 +14,10 @@ use alloy_primitives::{ keccak256, Address, Bytes, PrimitiveSignature as Signature, TxHash, TxKind, Uint, B256, U256, }; use alloy_rlp::Header; +use core::{ + hash::{Hash, Hasher}, + mem, +}; use derive_more::{AsRef, Deref}; #[cfg(not(feature = "std"))] use once_cell::sync::OnceCell as OnceLock; @@ -32,8 +30,8 @@ use reth_primitives::{ }; use reth_primitives_traits::{FillTxEnv, InMemorySize, SignedTransaction}; use revm_primitives::{AuthorizationList, OptimismFields, TxEnv}; - -use crate::{OpTransaction, OpTxType}; +#[cfg(feature = "std")] +use std::sync::OnceLock; /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] @@ -105,10 +103,6 @@ impl SignedTransaction for OpTransactionSigned { recover_signer_unchecked(signature, signature_hash) } - fn recalculate_hash(&self) -> B256 { - keccak256(self.encoded_2718()) - } - fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option

{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -119,6 +113,10 @@ impl SignedTransaction for OpTransactionSigned { let signature_hash = keccak256(buf); recover_signer_unchecked(&self.signature, signature_hash) } + + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } } impl FillTxEnv for OpTransactionSigned { diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 9894dd8a3db..968beaf9e83 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -73,5 +73,5 @@ optimism = [ "revm/optimism", "reth-optimism-consensus/optimism", "reth-optimism-payload-builder/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", ] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index f6d577aadbe..84808ed7c38 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -96,7 +96,7 @@ optimism = [ "reth-db/optimism", "reth-db-api/optimism", "revm/optimism", - "reth-optimism-primitives/op", + "reth-optimism-primitives/optimism", ] serde = [ "dashmap/serde", From 3091386fd1bd80564530a1d712f1b3828b6db694 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 4 Dec 2024 15:45:23 +0100 Subject: [PATCH 876/970] fix(witness): collect witness using sparse trie (#13072) --- crates/evm/execution-errors/src/trie.rs | 11 +- crates/trie/sparse/src/state.rs | 2 +- crates/trie/trie/src/proof/blinded.rs | 34 +++ crates/trie/trie/src/witness.rs | 335 ++++++++---------------- 4 files changed, 153 insertions(+), 229 deletions(-) diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 83210faab52..8d04f97e8ea 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -118,7 +118,7 @@ pub enum SparseTrieError { } /// Trie witness errors. -#[derive(Error, PartialEq, Eq, Clone, Debug)] +#[derive(Error, Debug)] pub enum TrieWitnessError { /// Error gather proofs. #[error(transparent)] @@ -126,15 +126,12 @@ pub enum TrieWitnessError { /// RLP decoding error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), + /// Sparse state trie error. + #[error(transparent)] + Sparse(#[from] SparseStateTrieError), /// Missing account. #[error("missing account {_0}")] MissingAccount(B256), - /// Missing target node. - #[error("target node missing from proof {_0:?}")] - MissingTargetNode(Nibbles), - /// Unexpected empty root. - #[error("unexpected empty root: {_0:?}")] - UnexpectedEmptyRoot(Nibbles), } impl From for ProviderError { diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 9b4b3800251..7c79b58e867 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -36,10 +36,10 @@ pub struct SparseStateTrie Self { Self { + provider_factory: Default::default(), state: Default::default(), storages: Default::default(), revealed: Default::default(), - provider_factory: Default::default(), retain_updates: false, account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), } diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index 5fd3ecdc08e..a7b60bc6b27 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -20,6 +20,17 @@ pub struct ProofBlindedProviderFactory { prefix_sets: Arc, } +impl ProofBlindedProviderFactory { + /// Create new proof-based blinded provider factory. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + } +} + impl BlindedProviderFactory for ProofBlindedProviderFactory where T: TrieCursorFactory + Clone, @@ -57,6 +68,17 @@ pub struct ProofBlindedAccountProvider { prefix_sets: Arc, } +impl ProofBlindedAccountProvider { + /// Create new proof-based blinded account node provider. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + } +} + impl BlindedProvider for ProofBlindedAccountProvider where T: TrieCursorFactory + Clone, @@ -89,6 +111,18 @@ pub struct ProofBlindedStorageProvider { account: B256, } +impl ProofBlindedStorageProvider { + /// Create new proof-based blinded storage node provider. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + account: B256, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets, account } + } +} + impl BlindedProvider for ProofBlindedStorageProvider where T: TrieCursorFactory + Clone, diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 46f85c4d82e..e8f5b8741a5 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,23 +1,25 @@ use crate::{ hashed_cursor::{HashedCursor, HashedCursorFactory}, prefix_set::TriePrefixSetsMut, - proof::{Proof, StorageProof}, + proof::{Proof, ProofBlindedProviderFactory}, trie_cursor::TrieCursorFactory, - HashedPostState, TRIE_ACCOUNT_RLP_MAX_SIZE, + HashedPostState, }; -use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{Entry, HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::{BufMut, Decodable, Encodable}; -use itertools::{Either, Itertools}; -use reth_execution_errors::{StateProofError, TrieWitnessError}; -use reth_trie_common::{ - BranchNode, HashBuilder, Nibbles, StorageMultiProof, TrieAccount, TrieNode, CHILD_INDEX_RANGE, +use itertools::Itertools; +use reth_execution_errors::{ + SparseStateTrieError, SparseTrieError, StateProofError, TrieWitnessError, }; -use std::collections::BTreeMap; +use reth_trie_common::Nibbles; +use reth_trie_sparse::{ + blinded::{BlindedProvider, BlindedProviderFactory}, + SparseStateTrie, +}; +use std::sync::{mpsc, Arc}; /// State transition witness for the trie. #[derive(Debug)] @@ -90,107 +92,74 @@ where } let proof_targets = self.get_proof_targets(&state)?; - let mut account_multiproof = + let multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) .multiproof(proof_targets.clone())?; - // Attempt to compute state root from proofs and gather additional - // information for the witness. - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_trie_nodes = BTreeMap::default(); - for (hashed_address, hashed_slots) in proof_targets { - let storage_multiproof = account_multiproof - .storages - .remove(&hashed_address) - .unwrap_or_else(StorageMultiProof::empty); + // Record all nodes from multiproof in the witness + for account_node in multiproof.account_subtree.values() { + if let Entry::Vacant(entry) = self.witness.entry(keccak256(account_node.as_ref())) { + entry.insert(account_node.clone()); + } + } + for storage_node in multiproof.storages.values().flat_map(|s| s.subtree.values()) { + if let Entry::Vacant(entry) = self.witness.entry(keccak256(storage_node.as_ref())) { + entry.insert(storage_node.clone()); + } + } - // Gather and record account trie nodes. - let account = state - .accounts - .get(&hashed_address) - .ok_or(TrieWitnessError::MissingAccount(hashed_address))?; - let value = - (account.is_some() || storage_multiproof.root != EMPTY_ROOT_HASH).then(|| { - account_rlp.clear(); - TrieAccount::from((account.unwrap_or_default(), storage_multiproof.root)) - .encode(&mut account_rlp as &mut dyn BufMut); - account_rlp.clone() - }); - let key = Nibbles::unpack(hashed_address); - account_trie_nodes.extend(target_nodes( - key.clone(), - value, - Some(&mut self.witness), - account_multiproof - .account_subtree - .matching_nodes_iter(&key) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?); + let (tx, rx) = mpsc::channel(); + let proof_provider_factory = ProofBlindedProviderFactory::new( + self.trie_cursor_factory, + self.hashed_cursor_factory, + Arc::new(self.prefix_sets), + ); + let mut sparse_trie = + SparseStateTrie::new(WitnessBlindedProviderFactory::new(proof_provider_factory, tx)); + sparse_trie.reveal_multiproof(proof_targets.clone(), multiproof)?; - // Gather and record storage trie nodes for this account. - let mut storage_trie_nodes = BTreeMap::default(); + // Attempt to update state trie to gather additional information for the witness. + for (hashed_address, hashed_slots) in + proof_targets.into_iter().sorted_unstable_by_key(|(ha, _)| *ha) + { + // Update storage trie first. let storage = state.storages.get(&hashed_address); - for hashed_slot in hashed_slots { - let slot_nibbles = Nibbles::unpack(hashed_slot); - let slot_value = storage + let storage_trie = sparse_trie + .storage_trie_mut(&hashed_address) + .ok_or(SparseStateTrieError::Sparse(SparseTrieError::Blind))?; + for hashed_slot in hashed_slots.into_iter().sorted_unstable() { + let storage_nibbles = Nibbles::unpack(hashed_slot); + let maybe_leaf_value = storage .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - storage_trie_nodes.extend(target_nodes( - slot_nibbles.clone(), - slot_value, - Some(&mut self.witness), - storage_multiproof - .subtree - .matching_nodes_iter(&slot_nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?); + + if let Some(value) = maybe_leaf_value { + storage_trie + .update_leaf(storage_nibbles, value) + .map_err(SparseStateTrieError::Sparse)?; + } else { + storage_trie + .remove_leaf(&storage_nibbles) + .map_err(SparseStateTrieError::Sparse)?; + } } - next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let target_key = B256::from_slice(&padded_key); - let storage_prefix_set = self - .prefix_sets - .storage_prefix_sets - .get(&hashed_address) - .cloned() - .unwrap_or_default(); - let proof = StorageProof::new_hashed( - self.trie_cursor_factory.clone(), - self.hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(storage_prefix_set) - .storage_multiproof(HashSet::from_iter([target_key]))?; + // Calculate storage root after updates. + storage_trie.root(); - // The subtree only contains the proof for a single target. - let node = - proof.subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; - self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node.clone()) - })?; - } - - next_root_from_proofs(account_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let targets = HashMap::from_iter([(B256::from_slice(&padded_key), HashSet::default())]); - let proof = - Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) - .with_prefix_sets_mut(self.prefix_sets.clone()) - .multiproof(targets)?; + let account = state + .accounts + .get(&hashed_address) + .ok_or(TrieWitnessError::MissingAccount(hashed_address))? + .unwrap_or_default(); + sparse_trie.update_account(hashed_address, account)?; - // The subtree only contains the proof for a single target. - let node = - proof.account_subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; - self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node.clone()) - })?; + while let Ok(node) = rx.try_recv() { + self.witness.insert(keccak256(&node), node); + } + } Ok(self.witness) } @@ -225,141 +194,65 @@ where } } -/// Decodes and unrolls all nodes from the proof. Returns only sibling nodes -/// in the path of the target and the final leaf node with updated value. -pub fn target_nodes<'b>( - key: Nibbles, - value: Option>, - mut witness: Option<&mut HashMap>, - proof: impl IntoIterator, -) -> Result>>, TrieWitnessError> { - let mut trie_nodes = BTreeMap::default(); - let mut proof_iter = proof.into_iter().enumerate().peekable(); - while let Some((idx, (path, encoded))) = proof_iter.next() { - // Record the node in witness. - if let Some(witness) = witness.as_mut() { - witness.insert(keccak256(encoded.as_ref()), encoded.clone()); - } +#[derive(Debug)] +struct WitnessBlindedProviderFactory { + /// Blinded node provider factory. + provider_factory: F, + /// Sender for forwarding fetched blinded node. + tx: mpsc::Sender, +} - let mut next_path = path.clone(); - match TrieNode::decode(&mut &encoded[..])? { - TrieNode::Branch(branch) => { - next_path.push(key[path.len()]); - let children = branch_node_children(path.clone(), &branch); - for (child_path, value) in children { - if !key.starts_with(&child_path) { - let value = if value.len() < B256::len_bytes() { - Either::Right(value.to_vec()) - } else { - Either::Left(B256::from_slice(&value[1..])) - }; - trie_nodes.insert(child_path, value); - } - } - } - TrieNode::Extension(extension) => { - next_path.extend_from_slice(&extension.key); - } - TrieNode::Leaf(leaf) => { - next_path.extend_from_slice(&leaf.key); - if next_path != key { - trie_nodes - .insert(next_path.clone(), Either::Right(leaf.value.as_slice().to_vec())); - } - } - TrieNode::EmptyRoot => { - if idx != 0 || proof_iter.peek().is_some() { - return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) - } - } - }; +impl WitnessBlindedProviderFactory { + const fn new(provider_factory: F, tx: mpsc::Sender) -> Self { + Self { provider_factory, tx } } +} - if let Some(value) = value { - trie_nodes.insert(key, Either::Right(value)); +impl BlindedProviderFactory for WitnessBlindedProviderFactory +where + F: BlindedProviderFactory, + F::AccountNodeProvider: BlindedProvider, + F::StorageNodeProvider: BlindedProvider, +{ + type AccountNodeProvider = WitnessBlindedProvider; + type StorageNodeProvider = WitnessBlindedProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + let provider = self.provider_factory.account_node_provider(); + WitnessBlindedProvider::new(provider, self.tx.clone()) } - Ok(trie_nodes) + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + let provider = self.provider_factory.storage_node_provider(account); + WitnessBlindedProvider::new(provider, self.tx.clone()) + } } -/// Computes the next root hash of a trie by processing a set of trie nodes and -/// their provided values. -pub fn next_root_from_proofs( - trie_nodes: BTreeMap>>, - mut trie_node_provider: impl FnMut(Nibbles) -> Result, -) -> Result { - // Ignore branch child hashes in the path of leaves or lower child hashes. - let mut keys = trie_nodes.keys().peekable(); - let mut ignored = HashSet::::default(); - while let Some(key) = keys.next() { - if keys.peek().is_some_and(|next| next.starts_with(key)) { - ignored.insert(key.clone()); - } - } +#[derive(Debug)] +struct WitnessBlindedProvider

{ + /// Proof-based blinded. + provider: P, + /// Sender for forwarding fetched blinded node. + tx: mpsc::Sender, +} - let mut hash_builder = HashBuilder::default(); - let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); - while let Some((path, value)) = trie_nodes.next() { - match value { - Either::Left(branch_hash) => { - let parent_branch_path = path.slice(..path.len() - 1); - if hash_builder.key.starts_with(&parent_branch_path) || - trie_nodes.peek().is_some_and(|next| next.0.starts_with(&parent_branch_path)) - { - hash_builder.add_branch(path, branch_hash, false); - } else { - // Parent is a branch node that needs to be turned into an extension node. - let mut path = path.clone(); - loop { - let node = trie_node_provider(path.clone())?; - match TrieNode::decode(&mut &node[..])? { - TrieNode::Branch(branch) => { - let children = branch_node_children(path, &branch); - for (child_path, value) in children { - if value.len() < B256::len_bytes() { - hash_builder.add_leaf(child_path, value); - } else { - let hash = B256::from_slice(&value[1..]); - hash_builder.add_branch(child_path, hash, false); - } - } - break - } - TrieNode::Leaf(leaf) => { - let mut child_path = path; - child_path.extend_from_slice(&leaf.key); - hash_builder.add_leaf(child_path, &leaf.value); - break - } - TrieNode::Extension(ext) => { - path.extend_from_slice(&ext.key); - } - TrieNode::EmptyRoot => { - return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) - } - } - } - } - } - Either::Right(leaf_value) => { - hash_builder.add_leaf(path, &leaf_value); - } - } +impl

WitnessBlindedProvider

{ + const fn new(provider: P, tx: mpsc::Sender) -> Self { + Self { provider, tx } } - Ok(hash_builder.root()) } -/// Returned branch node children with keys in order. -fn branch_node_children(prefix: Nibbles, node: &BranchNode) -> Vec<(Nibbles, &[u8])> { - let mut children = Vec::with_capacity(node.state_mask.count_ones() as usize); - let mut stack_ptr = node.as_ref().first_child_index(); - for index in CHILD_INDEX_RANGE { - if node.state_mask.is_bit_set(index) { - let mut child_path = prefix.clone(); - child_path.push(index); - children.push((child_path, &node.stack[stack_ptr][..])); - stack_ptr += 1; +impl

BlindedProvider for WitnessBlindedProvider

+where + P: BlindedProvider, +{ + type Error = P::Error; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let maybe_node = self.provider.blinded_node(path)?; + if let Some(node) = &maybe_node { + self.tx.send(node.clone()).map_err(|error| SparseTrieError::Other(Box::new(error)))?; } + Ok(maybe_node) } - children } From 0daa456f3a93ccb7d02dd6e5d617eaef7d1ea466 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 4 Dec 2024 17:56:47 +0000 Subject: [PATCH 877/970] chore: add container question to bug template (#13134) --- .github/ISSUE_TEMPLATE/bug.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 1142a5bf251..b3e50defe16 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -60,6 +60,18 @@ body: - Mac (Apple Silicon) - Windows (x86) - Windows (ARM) + - type: dropdown + id: container_type + attributes: + label: Container Type + description: Were you running it in a container? + multiple: true + options: + - None + - Docker + - Kubernetes + - LXC/LXD + - Other validations: required: true - type: textarea From fbd2d6eeda99eeff7eb0d708c6598384e2c028d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 4 Dec 2024 19:27:58 +0100 Subject: [PATCH 878/970] refactor: split `NetworkEventListenerProvider` (#12972) --- Cargo.lock | 5 +- crates/e2e-test-utils/Cargo.toml | 2 +- crates/e2e-test-utils/src/network.rs | 9 +- crates/net/network-api/src/events.rs | 154 +++++++++----- crates/net/network/src/manager.rs | 21 +- crates/net/network/src/network.rs | 13 ++ crates/net/network/src/test_utils/testnet.rs | 28 ++- crates/net/network/src/transactions/mod.rs | 212 ++++++++----------- crates/net/network/tests/it/connect.rs | 28 ++- crates/net/network/tests/it/session.rs | 15 +- crates/net/network/tests/it/txgossip.rs | 11 +- docs/crates/network.md | 87 ++++---- examples/bsc-p2p/src/main.rs | 10 +- examples/polygon-p2p/Cargo.toml | 1 + examples/polygon-p2p/src/main.rs | 4 +- 15 files changed, 346 insertions(+), 254 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b10c4d55d1c..ea4ffec0dbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3067,6 +3067,7 @@ dependencies = [ "reth-chainspec", "reth-discv4", "reth-network", + "reth-network-api", "reth-primitives", "reth-tracing", "secp256k1", @@ -9560,9 +9561,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41bbeb6004cc4ed48d27756f0479011df91a6f5642a3abab9309eda5ce67c4ad" +checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index bedacbecd75..7cb8516816b 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -14,6 +14,7 @@ workspace = true reth-chainspec.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } +reth-network-api.workspace = true reth-rpc-layer.workspace = true reth-rpc-server-types.workspace = true reth-rpc-eth-api.workspace = true @@ -23,7 +24,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true reth-provider.workspace = true -reth-network-api.workspace = true reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 2efc8d47f2d..ce9d0b94612 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,6 +1,7 @@ use futures_util::StreamExt; use reth_network_api::{ - test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, PeersInfo, + events::PeerEvent, test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, + PeersInfo, }; use reth_network_peers::{NodeRecord, PeerId}; use reth_tokio_util::EventStream; @@ -28,7 +29,7 @@ where self.network.peers_handle().add_peer(node_record.id, node_record.tcp_addr()); match self.network_events.next().await { - Some(NetworkEvent::PeerAdded(_)) => (), + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(_))) => (), ev => panic!("Expected a peer added event, got: {ev:?}"), } } @@ -42,7 +43,9 @@ where pub async fn next_session_established(&mut self) -> Option { while let Some(ev) = self.network_events.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => { + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; info!("Session established with peer: {:?}", peer_id); return Some(peer_id) } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index 624c43f5e1b..e17cedef11f 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -1,7 +1,5 @@ //! API related to listening for network events. -use std::{fmt, net::SocketAddr, sync::Arc}; - use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, @@ -13,26 +11,70 @@ use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; use reth_network_types::PeerAddr; use reth_tokio_util::EventStream; +use std::{ + fmt, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::{wrappers::UnboundedReceiverStream, Stream, StreamExt}; -/// Provides event subscription for the network. -#[auto_impl::auto_impl(&, Arc)] -pub trait NetworkEventListenerProvider: Send + Sync { - /// Creates a new [`NetworkEvent`] listener channel. - fn event_listener(&self) -> EventStream; - /// Returns a new [`DiscoveryEvent`] stream. - /// - /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. - fn discovery_listener(&self) -> UnboundedReceiverStream; +/// A boxed stream of network peer events that provides a type-erased interface. +pub struct PeerEventStream(Pin + Send + Sync>>); + +impl fmt::Debug for PeerEventStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeerEventStream").finish_non_exhaustive() + } +} + +impl PeerEventStream { + /// Create a new stream [`PeerEventStream`] by converting the provided stream's items into peer + /// events [`PeerEvent`] + pub fn new(stream: S) -> Self + where + S: Stream + Send + Sync + 'static, + T: Into + 'static, + { + let mapped_stream = stream.map(Into::into); + Self(Box::pin(mapped_stream)) + } } -/// (Non-exhaustive) Events emitted by the network that are of interest for subscribers. +impl Stream for PeerEventStream { + type Item = PeerEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.0.as_mut().poll_next(cx) + } +} + +/// Represents information about an established peer session. +#[derive(Debug, Clone)] +pub struct SessionInfo { + /// The identifier of the peer to which a session was established. + pub peer_id: PeerId, + /// The remote addr of the peer to which a session was established. + pub remote_addr: SocketAddr, + /// The client version of the peer to which a session was established. + pub client_version: Arc, + /// Capabilities the peer announced. + pub capabilities: Arc, + /// The status of the peer to which a session was established. + pub status: Arc, + /// Negotiated eth version of the session. + pub version: EthVersion, +} + +/// (Non-exhaustive) List of the different events emitted by the network that are of interest for +/// subscribers. /// /// This includes any event types that may be relevant to tasks, for metrics, keep track of peers /// etc. -#[derive(Debug)] -pub enum NetworkEvent { +#[derive(Debug, Clone)] +pub enum PeerEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -41,57 +83,65 @@ pub enum NetworkEvent { reason: Option, }, /// Established a new session with the given peer. - SessionEstablished { - /// The identifier of the peer to which a session was established. - peer_id: PeerId, - /// The remote addr of the peer to which a session was established. - remote_addr: SocketAddr, - /// The client version of the peer to which a session was established. - client_version: Arc, - /// Capabilities the peer announced - capabilities: Arc, - /// A request channel to the session task. - messages: PeerRequestSender, - /// The status of the peer to which a session was established. - status: Arc, - /// negotiated eth version of the session - version: EthVersion, - }, + SessionEstablished(SessionInfo), /// Event emitted when a new peer is added PeerAdded(PeerId), /// Event emitted when a new peer is removed PeerRemoved(PeerId), } +/// (Non-exhaustive) Network events representing peer lifecycle events and session requests. +#[derive(Debug)] +pub enum NetworkEvent { + /// Basic peer lifecycle event. + Peer(PeerEvent), + /// Session established with requests. + ActivePeerSession { + /// Session information + info: SessionInfo, + /// A request channel to the session task. + messages: PeerRequestSender, + }, +} + impl Clone for NetworkEvent { fn clone(&self) -> Self { match self { - Self::SessionClosed { peer_id, reason } => { - Self::SessionClosed { peer_id: *peer_id, reason: *reason } + Self::Peer(event) => Self::Peer(event.clone()), + Self::ActivePeerSession { info, messages } => { + Self::ActivePeerSession { info: info.clone(), messages: messages.clone() } } - Self::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => Self::SessionEstablished { - peer_id: *peer_id, - remote_addr: *remote_addr, - client_version: client_version.clone(), - capabilities: capabilities.clone(), - messages: messages.clone(), - status: status.clone(), - version: *version, - }, - Self::PeerAdded(peer) => Self::PeerAdded(*peer), - Self::PeerRemoved(peer) => Self::PeerRemoved(*peer), } } } +impl From> for PeerEvent { + fn from(event: NetworkEvent) -> Self { + match event { + NetworkEvent::Peer(peer_event) => peer_event, + NetworkEvent::ActivePeerSession { info, .. } => Self::SessionEstablished(info), + } + } +} + +/// Provides peer event subscription for the network. +#[auto_impl::auto_impl(&, Arc)] +pub trait NetworkPeersEvents: Send + Sync { + /// Creates a new peer event listener stream. + fn peer_events(&self) -> PeerEventStream; +} + +/// Provides event subscription for the network. +#[auto_impl::auto_impl(&, Arc)] +pub trait NetworkEventListenerProvider: NetworkPeersEvents { + /// Creates a new [`NetworkEvent`] listener channel. + fn event_listener(&self) -> EventStream>; + /// Returns a new [`DiscoveryEvent`] stream. + /// + /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. + fn discovery_listener(&self) -> UnboundedReceiverStream; +} + /// Events produced by the `Discovery` manager. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveryEvent { diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index bad6ecba5fa..89e21b9dd2d 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -44,7 +44,9 @@ use reth_eth_wire::{ use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_network_api::{ - test_utils::PeersHandle, EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, + events::{PeerEvent, SessionInfo}, + test_utils::PeersHandle, + EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, }; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::ReputationChangeKind; @@ -712,24 +714,26 @@ impl NetworkManager { self.update_active_connection_metrics(); - self.event_sender.notify(NetworkEvent::SessionEstablished { + let session_info = SessionInfo { peer_id, remote_addr, client_version, capabilities, - version, status, - messages, - }); + version, + }; + + self.event_sender + .notify(NetworkEvent::ActivePeerSession { info: session_info, messages }); } SwarmEvent::PeerAdded(peer_id) => { trace!(target: "net", ?peer_id, "Peer added"); - self.event_sender.notify(NetworkEvent::PeerAdded(peer_id)); + self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::PeerRemoved(peer_id) => { trace!(target: "net", ?peer_id, "Peer dropped"); - self.event_sender.notify(NetworkEvent::PeerRemoved(peer_id)); + self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerRemoved(peer_id))); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::SessionClosed { peer_id, remote_addr, error } => { @@ -772,7 +776,8 @@ impl NetworkManager { .saturating_sub(1) as f64, ); - self.event_sender.notify(NetworkEvent::SessionClosed { peer_id, reason }); + self.event_sender + .notify(NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason })); } SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => { trace!( diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 7e0b000cf34..225b6332e0e 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -4,6 +4,7 @@ use crate::{ }; use alloy_primitives::B256; use enr::Enr; +use futures::StreamExt; use parking_lot::Mutex; use reth_discv4::{Discv4, NatResolver}; use reth_discv5::Discv5; @@ -13,6 +14,7 @@ use reth_eth_wire::{ }; use reth_ethereum_forks::Head; use reth_network_api::{ + events::{NetworkPeersEvents, PeerEvent, PeerEventStream}, test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers, @@ -192,6 +194,17 @@ impl NetworkHandle { // === API Implementations === +impl NetworkPeersEvents for NetworkHandle { + /// Returns an event stream of peer-specific network events. + fn peer_events(&self) -> PeerEventStream { + let peer_events = self.inner.event_sender.new_listener().map(|event| match event { + NetworkEvent::Peer(peer_event) => peer_event, + NetworkEvent::ActivePeerSession { info, .. } => PeerEvent::SessionEstablished(info), + }); + PeerEventStream::new(peer_events) + } +} + impl NetworkEventListenerProvider for NetworkHandle { fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 08bf24b8853..a27df7e7202 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -13,6 +13,7 @@ use pin_project::pin_project; use reth_chainspec::{Hardforks, MAINNET}; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{ + events::{PeerEvent, SessionInfo}, test_utils::{PeersHandle, PeersHandleProvider}, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers, }; @@ -641,7 +642,9 @@ impl NetworkEventStream { pub async fn next_session_closed(&mut self) -> Option<(PeerId, Option)> { while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionClosed { peer_id, reason } => return Some((peer_id, reason)), + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => { + return Some((peer_id, reason)) + } _ => continue, } } @@ -652,7 +655,10 @@ impl NetworkEventStream { pub async fn next_session_established(&mut self) -> Option { while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => return Some(peer_id), + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + return Some(info.peer_id) + } _ => continue, } } @@ -667,7 +673,7 @@ impl NetworkEventStream { let mut peers = Vec::with_capacity(num); while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => { + NetworkEvent::ActivePeerSession { info: SessionInfo { peer_id, .. }, .. } => { peers.push(peer_id); num -= 1; if num == 0 { @@ -680,18 +686,24 @@ impl NetworkEventStream { peers } - /// Ensures that the first two events are a [`NetworkEvent::PeerAdded`] and - /// [`NetworkEvent::SessionEstablished`], returning the [`PeerId`] of the established + /// Ensures that the first two events are a [`NetworkEvent::Peer(PeerEvent::PeerAdded`] and + /// [`NetworkEvent::ActivePeerSession`], returning the [`PeerId`] of the established /// session. pub async fn peer_added_and_established(&mut self) -> Option { let peer_id = match self.inner.next().await { - Some(NetworkEvent::PeerAdded(peer_id)) => peer_id, + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => peer_id, _ => return None, }; match self.inner.next().await { - Some(NetworkEvent::SessionEstablished { peer_id: peer_id2, .. }) => { - debug_assert_eq!(peer_id, peer_id2, "PeerAdded peer_id {peer_id} does not match SessionEstablished peer_id {peer_id2}"); + Some(NetworkEvent::ActivePeerSession { + info: SessionInfo { peer_id: peer_id2, .. }, + .. + }) => { + debug_assert_eq!( + peer_id, peer_id2, + "PeerAdded peer_id {peer_id} does not match SessionEstablished peer_id {peer_id2}" + ); Some(peer_id) } _ => None, diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index a1097dacf55..2e6e2f08b65 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -40,6 +40,7 @@ use reth_eth_wire::{ }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{ + events::{PeerEvent, SessionInfo}, NetworkEvent, NetworkEventListenerProvider, PeerRequest, PeerRequestSender, Peers, }; use reth_network_p2p::{ @@ -1050,55 +1051,81 @@ where } } + /// Handles session establishment and peer transactions initialization. + fn handle_peer_session( + &mut self, + info: SessionInfo, + messages: PeerRequestSender>, + ) { + let SessionInfo { peer_id, client_version, version, .. } = info; + + // Insert a new peer into the peerset. + let peer = PeerMetadata::::new( + messages, + version, + client_version, + self.config.max_transactions_seen_by_peer_history, + ); + let peer = match self.peers.entry(peer_id) { + Entry::Occupied(mut entry) => { + entry.insert(peer); + entry.into_mut() + } + Entry::Vacant(entry) => entry.insert(peer), + }; + + // Send a `NewPooledTransactionHashes` to the peer with up to + // `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE` + // transactions in the pool. + if self.network.is_initially_syncing() || self.network.tx_gossip_disabled() { + trace!(target: "net::tx", ?peer_id, "Skipping transaction broadcast: node syncing or gossip disabled"); + return + } + + // Get transactions to broadcast + let pooled_txs = self.pool.pooled_transactions_max( + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ); + if pooled_txs.is_empty() { + trace!(target: "net::tx", ?peer_id, "No transactions in the pool to broadcast"); + return; + } + + // Build and send transaction hashes message + let mut msg_builder = PooledTransactionsHashesBuilder::new(version); + for pooled_tx in pooled_txs { + peer.seen_transactions.insert(*pooled_tx.hash()); + msg_builder.push_pooled(pooled_tx); + } + + debug!(target: "net::tx", ?peer_id, tx_count = msg_builder.is_empty(), "Broadcasting transaction hashes"); + let msg = msg_builder.build(); + self.network.send_transactions_hashes(peer_id, msg); + } + /// Handles a received event related to common network events. fn on_network_event(&mut self, event_result: NetworkEvent>) { match event_result { - NetworkEvent::SessionClosed { peer_id, .. } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { // remove the peer self.peers.remove(&peer_id); self.transaction_fetcher.remove_peer(&peer_id); } - NetworkEvent::SessionEstablished { - peer_id, client_version, messages, version, .. - } => { - // Insert a new peer into the peerset. - let peer = PeerMetadata::new( - messages, - version, - client_version, - self.config.max_transactions_seen_by_peer_history, - ); - let peer = match self.peers.entry(peer_id) { - Entry::Occupied(mut entry) => { - entry.insert(peer); - entry.into_mut() + NetworkEvent::ActivePeerSession { info, messages } => { + // process active peer session and broadcast available transaction from the pool + self.handle_peer_session(info, messages); + } + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; + // get messages from existing peer + let messages = match self.peers.get(&peer_id) { + Some(p) => p.request_tx.clone(), + None => { + debug!(target: "net::tx", ?peer_id, "No peer request sender found"); + return; } - Entry::Vacant(entry) => entry.insert(peer), }; - - // Send a `NewPooledTransactionHashes` to the peer with up to - // `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE` - // transactions in the pool. - if self.network.is_initially_syncing() || self.network.tx_gossip_disabled() { - return - } - - let pooled_txs = self.pool.pooled_transactions_max( - SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, - ); - if pooled_txs.is_empty() { - // do not send a message if there are no transactions in the pool - return - } - - let mut msg_builder = PooledTransactionsHashesBuilder::new(version); - for pooled_tx in pooled_txs { - peer.seen_transactions.insert(*pooled_tx.hash()); - msg_builder.push_pooled(pooled_tx); - } - - let msg = msg_builder.build(); - self.network.send_transactions_hashes(peer_id, msg); + self.handle_peer_session(info, messages); } _ => {} } @@ -1987,27 +2014,12 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions + .on_network_event(NetworkEvent::Peer(PeerEvent::SessionEstablished(info))) } - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -2073,28 +2085,13 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions.on_network_event(ev); } - NetworkEvent::PeerAdded(_peer_id) => continue, - ev => { + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + _ => { error!("unexpected event {ev:?}") } } @@ -2157,27 +2154,12 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions.on_network_event(ev); } - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -2248,24 +2230,11 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }), - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { + transactions.on_network_event(ev); + } + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -2495,17 +2464,18 @@ mod tests { network.handle().update_sync_state(SyncState::Idle); // mock a peer - let (tx, _rx) = mpsc::channel(1); - tx_manager.on_network_event(NetworkEvent::SessionEstablished { + let (tx, _rx) = mpsc::channel::(1); + let session_info = SessionInfo { peer_id, remote_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), client_version: Arc::from(""), capabilities: Arc::new(vec![].into()), - messages: PeerRequestSender::new(peer_id, tx), status: Arc::new(Default::default()), version: EthVersion::Eth68, - }); - + }; + let messages: PeerRequestSender = PeerRequestSender::new(peer_id, tx); + tx_manager + .on_network_event(NetworkEvent::ActivePeerSession { info: session_info, messages }); let mut propagate = vec![]; let mut factory = MockTransactionFactory::default(); let eip1559_tx = Arc::new(factory.create_eip1559()); diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 0a17cbd563e..77044f4b72d 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -15,7 +15,10 @@ use reth_network::{ BlockDownloaderProvider, NetworkConfigBuilder, NetworkEvent, NetworkEventListenerProvider, NetworkManager, PeersConfig, }; -use reth_network_api::{NetworkInfo, Peers, PeersInfo}; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + NetworkInfo, Peers, PeersInfo, +}; use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, sync::{NetworkSyncUpdater, SyncState}, @@ -59,13 +62,15 @@ async fn test_establish_connections() { let mut established = listener0.take(4); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionClosed { .. } | NetworkEvent::PeerRemoved(_) => { + NetworkEvent::Peer(PeerEvent::SessionClosed { .. } | PeerEvent::PeerRemoved(_)) => { panic!("unexpected event") } - NetworkEvent::SessionEstablished { peer_id, .. } => { - assert!(expected_connections.remove(&peer_id)) + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let SessionInfo { peer_id, .. } = info; + assert!(expected_connections.remove(&peer_id)); } - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert!(expected_peers.remove(&peer_id)) } } @@ -496,11 +501,16 @@ async fn test_geth_disconnect() { handle.add_peer(geth_peer_id, geth_socket); match events.next().await { - Some(NetworkEvent::PeerAdded(peer_id)) => assert_eq!(peer_id, geth_peer_id), + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => { + assert_eq!(peer_id, geth_peer_id) + } _ => panic!("Expected a peer added event"), } - if let Some(NetworkEvent::SessionEstablished { peer_id, .. }) = events.next().await { + if let Some(NetworkEvent::Peer(PeerEvent::SessionEstablished(session_info))) = + events.next().await + { + let SessionInfo { peer_id, .. } = session_info; assert_eq!(peer_id, geth_peer_id); } else { panic!("Expected a session established event"); @@ -510,7 +520,9 @@ async fn test_geth_disconnect() { handle.disconnect_peer(geth_peer_id); // wait for a disconnect from geth - if let Some(NetworkEvent::SessionClosed { peer_id, .. }) = events.next().await { + if let Some(NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. })) = + events.next().await + { assert_eq!(peer_id, geth_peer_id); } else { panic!("Expected a session closed event"); diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 3f74db3d37f..71152c29bb8 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -6,7 +6,10 @@ use reth_network::{ test_utils::{PeerConfig, Testnet}, NetworkEvent, NetworkEventListenerProvider, }; -use reth_network_api::{NetworkInfo, Peers}; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + NetworkInfo, Peers, +}; use reth_provider::test_utils::NoopProvider; #[tokio::test(flavor = "multi_thread")] @@ -28,10 +31,11 @@ async fn test_session_established_with_highest_version() { while let Some(event) = events.next().await { match event { - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert_eq!(handle1.peer_id(), &peer_id); } - NetworkEvent::SessionEstablished { peer_id, status, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; assert_eq!(handle1.peer_id(), &peer_id); assert_eq!(status.version, EthVersion::Eth68); } @@ -66,10 +70,11 @@ async fn test_session_established_with_different_capability() { while let Some(event) = events.next().await { match event { - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert_eq!(handle1.peer_id(), &peer_id); } - NetworkEvent::SessionEstablished { peer_id, status, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; assert_eq!(handle1.peer_id(), &peer_id); assert_eq!(status.version, EthVersion::Eth66); } diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index ebde61ef8ea..c9911885ad8 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -7,7 +7,7 @@ use alloy_primitives::{PrimitiveSignature as Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; -use reth_network_api::PeersInfo; +use reth_network_api::{events::PeerEvent, PeersInfo}; use reth_primitives::TransactionSigned; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; @@ -139,16 +139,17 @@ async fn test_sending_invalid_transactions() { // await disconnect for bad tx spam if let Some(ev) = peer1_events.next().await { match ev { - NetworkEvent::SessionClosed { peer_id, .. } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { assert_eq!(peer_id, *peer0.peer_id()); } - NetworkEvent::SessionEstablished { .. } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished { .. }) => { panic!("unexpected SessionEstablished event") } - NetworkEvent::PeerAdded(_) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(_)) => { panic!("unexpected PeerAdded event") } - NetworkEvent::PeerRemoved(_) => { + NetworkEvent::Peer(PeerEvent::PeerRemoved(_)) => { panic!("unexpected PeerRemoved event") } } diff --git a/docs/crates/network.md b/docs/crates/network.md index be2c7cb3b14..7e38ac5d601 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -787,8 +787,24 @@ The `TransactionsManager.network_events` stream is the first to have all of its The events received in this channel are of type `NetworkEvent`: [File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) + +```rust,ignore +pub enum NetworkEvent { + /// Basic peer lifecycle event. + Peer(PeerEvent), + /// Session established with requests. + ActivePeerSession { + /// Session information + info: SessionInfo, + /// A request channel to the session task. + messages: PeerRequestSender, + }, +} +``` + +and with ```rust,ignore -pub enum NetworkEvent { +pub enum PeerEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -797,29 +813,29 @@ pub enum NetworkEvent { reason: Option, }, /// Established a new session with the given peer. - SessionEstablished { - /// The identifier of the peer to which a session was established. - peer_id: PeerId, - /// Capabilities the peer announced - capabilities: Arc, - /// A request channel to the session task. - messages: PeerRequestSender, - /// The status of the peer to which a session was established. - status: Status, - }, + SessionEstablished(SessionInfo), /// Event emitted when a new peer is added PeerAdded(PeerId), /// Event emitted when a new peer is removed PeerRemoved(PeerId), } ``` +[File: crates/net/network-api/src/events.rs](https://github.com/paradigmxyz/reth/blob/c46b5fc1157d12184d1dceb4dc45e26cf74b2bc6/crates/net/network-api/src/events.rs) -They're handled with the `on_network_event` method, which responds to the two variants of the `NetworkEvent` enum in the following ways: +They're handled with the `on_network_event` method, which processes session events through both `NetworkEvent::Peer(PeerEvent::SessionClosed)`, `NetworkEvent::Peer(PeerEvent::SessionEstablished)`, and `NetworkEvent::ActivePeerSession` for initializing peer connections and transaction broadcasting. -**`NetworkEvent::SessionClosed`** +Variants of the `PeerEvent` enum are defined in the following ways: + +**`PeerEvent::PeerAdded`** +Adds a peer to the network node via network handle + +**`PeerEvent::PeerRemoved`** Removes the peer given by `NetworkEvent::SessionClosed.peer_id` from the `TransactionsManager.peers` map. -**`NetworkEvent::SessionEstablished`** +**`PeerEvent::SessionClosed`** +Closes the peer session after disconnection + +**`PeerEvent::SessionEstablished`** Begins by inserting a `Peer` into `TransactionsManager.peers` by `peer_id`, which is a struct of the following form: [File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) @@ -840,33 +856,30 @@ After the `Peer` is added to `TransactionsManager.peers`, the hashes of all of t [File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore -fn on_network_event(&mut self, event: NetworkEvent) { - match event { - NetworkEvent::SessionClosed { peer_id, .. } => { +fn on_network_event(&mut self, event_result: NetworkEvent) { + match event_result { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { // remove the peer self.peers.remove(&peer_id); + self.transaction_fetcher.remove_peer(&peer_id); } - NetworkEvent::SessionEstablished { peer_id, messages, .. } => { - // insert a new peer - self.peers.insert( - peer_id, - Peer { - transactions: LruCache::new( - NonZeroUsize::new(PEER_TRANSACTION_CACHE_LIMIT).unwrap(), - ), - request_tx: messages, - }, - ); - - // Send a `NewPooledTransactionHashes` to the peer with _all_ transactions in the - // pool - let msg = NewPooledTransactionHashes(self.pool.pooled_transactions()); - self.network.send_message(NetworkHandleMessage::SendPooledTransactionHashes { - peer_id, - msg, - }) + NetworkEvent::ActivePeerSession { info, messages } => { + // process active peer session and broadcast available transaction from the pool + self.handle_peer_session(info, messages); + } + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; + // get messages from existing peer + let messages = match self.peers.get(&peer_id) { + Some(p) => p.request_tx.clone(), + None => { + debug!(target: "net::tx", ?peer_id, "No peer request sender found"); + return; + } + }; + self.handle_peer_session(info, messages); } - _ => {} + _ => {} } } ``` diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs index 9e83f34e92f..cea87918322 100644 --- a/examples/bsc-p2p/src/main.rs +++ b/examples/bsc-p2p/src/main.rs @@ -17,7 +17,10 @@ use reth_discv4::Discv4ConfigBuilder; use reth_network::{ EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, }; -use reth_network_api::PeersInfo; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + PeersInfo, +}; use reth_primitives::{ForkHash, ForkId}; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, @@ -78,10 +81,11 @@ async fn main() { // For the sake of the example we only print the session established event // with the chain specific details match evt { - NetworkEvent::SessionEstablished { status, client_version, peer_id, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { status, client_version, peer_id, .. } = info; info!(peers=%net_handle.num_connected_peers() , %peer_id, chain = %status.chain, ?client_version, "Session established with a new peer."); } - NetworkEvent::SessionClosed { peer_id, reason } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => { info!(peers=%net_handle.num_connected_peers() , %peer_id, ?reason, "Session closed."); } diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index e18f32a6473..34536ed52d7 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -16,6 +16,7 @@ secp256k1 = { workspace = true, features = [ tokio.workspace = true reth-network.workspace = true reth-chainspec.workspace = true +reth-network-api.workspace = true reth-primitives.workspace = true serde_json.workspace = true reth-tracing.workspace = true diff --git a/examples/polygon-p2p/src/main.rs b/examples/polygon-p2p/src/main.rs index bcc17a24f8d..bae5399d9cd 100644 --- a/examples/polygon-p2p/src/main.rs +++ b/examples/polygon-p2p/src/main.rs @@ -15,6 +15,7 @@ use reth_network::{ config::NetworkMode, EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, }; +use reth_network_api::events::SessionInfo; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, Tracer, @@ -71,7 +72,8 @@ async fn main() { while let Some(evt) = events.next().await { // For the sake of the example we only print the session established event // with the chain specific details - if let NetworkEvent::SessionEstablished { status, client_version, .. } = evt { + if let NetworkEvent::ActivePeerSession { info, .. } = evt { + let SessionInfo { status, client_version, .. } = info; let chain = status.chain; info!(?chain, ?client_version, "Session established with a new peer."); } From 337272c88b8857cb0a36b2e9ce38c36a17b9a7e8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 4 Dec 2024 21:20:45 +0100 Subject: [PATCH 879/970] chore: add typed receipt for Receipt (#13140) --- crates/primitives-traits/src/receipt.rs | 5 ++--- crates/primitives/src/receipt.rs | 8 +++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 435748c4e1b..1c115981e3e 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use core::fmt; -use alloy_consensus::TxReceipt; +use alloy_consensus::{TxReceipt, Typed2718}; use alloy_primitives::B256; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; @@ -25,12 +25,11 @@ pub trait Receipt: + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable + + Typed2718 + MaybeSerde + InMemorySize + MaybeArbitrary { - /// Returns transaction type. - fn tx_type(&self) -> u8; } /// Extension if [`Receipt`] used in block execution. diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 79e15b89d7d..2e8e269e711 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -4,7 +4,7 @@ use reth_primitives_traits::InMemorySize; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - Eip658Value, TxReceipt, + Eip658Value, TxReceipt, Typed2718, }; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Bloom, Log, B256}; @@ -96,12 +96,14 @@ impl TxReceipt for Receipt { } } -impl reth_primitives_traits::Receipt for Receipt { - fn tx_type(&self) -> u8 { +impl Typed2718 for Receipt { + fn ty(&self) -> u8 { self.tx_type as u8 } } +impl reth_primitives_traits::Receipt for Receipt {} + impl ReceiptExt for Receipt { fn receipts_root(_receipts: &[&Self]) -> B256 { #[cfg(feature = "optimism")] From 27dab59ceb1f0b8c5e74cfc040deadd1f9469972 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 4 Dec 2024 20:34:37 +0000 Subject: [PATCH 880/970] feat(trie): collect branch node hash masks when calculating a proof (#13129) --- crates/trie/common/src/proofs.rs | 31 +++++++++++++--- crates/trie/parallel/src/proof.rs | 28 +++++++++++++-- crates/trie/trie/src/proof/mod.rs | 59 ++++++++++++++++++++++++++++--- 3 files changed, 108 insertions(+), 10 deletions(-) diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 517f9fb7ca8..99b315d2467 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -11,7 +11,7 @@ use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, proof::{verify_proof, ProofNodes, ProofVerificationError}, - EMPTY_ROOT_HASH, + TrieMask, EMPTY_ROOT_HASH, }; use itertools::Itertools; use reth_primitives_traits::Account; @@ -23,6 +23,8 @@ use reth_primitives_traits::Account; pub struct MultiProof { /// State trie multiproof for requested accounts. pub account_subtree: ProofNodes, + /// The hash masks of the branch nodes in the account proof. + pub branch_node_hash_masks: HashMap, /// Storage trie multiproofs. pub storages: HashMap, } @@ -108,11 +110,15 @@ impl MultiProof { pub fn extend(&mut self, other: Self) { self.account_subtree.extend_from(other.account_subtree); + self.branch_node_hash_masks.extend(other.branch_node_hash_masks); + for (hashed_address, storage) in other.storages { match self.storages.entry(hashed_address) { hash_map::Entry::Occupied(mut entry) => { debug_assert_eq!(entry.get().root, storage.root); - entry.get_mut().subtree.extend_from(storage.subtree); + let entry = entry.get_mut(); + entry.subtree.extend_from(storage.subtree); + entry.branch_node_hash_masks.extend(storage.branch_node_hash_masks); } hash_map::Entry::Vacant(entry) => { entry.insert(storage); @@ -129,6 +135,8 @@ pub struct StorageMultiProof { pub root: B256, /// Storage multiproof for requested slots. pub subtree: ProofNodes, + /// The hash masks of the branch nodes in the storage proof. + pub branch_node_hash_masks: HashMap, } impl StorageMultiProof { @@ -140,6 +148,7 @@ impl StorageMultiProof { Nibbles::default(), Bytes::from([EMPTY_STRING_CODE]), )]), + branch_node_hash_masks: HashMap::default(), } } @@ -380,14 +389,28 @@ mod tests { Nibbles::from_nibbles(vec![0]), alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), ); - proof1.storages.insert(addr, StorageMultiProof { root, subtree: subtree1 }); + proof1.storages.insert( + addr, + StorageMultiProof { + root, + subtree: subtree1, + branch_node_hash_masks: HashMap::default(), + }, + ); let mut subtree2 = ProofNodes::default(); subtree2.insert( Nibbles::from_nibbles(vec![1]), alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), ); - proof2.storages.insert(addr, StorageMultiProof { root, subtree: subtree2 }); + proof2.storages.insert( + addr, + StorageMultiProof { + root, + subtree: subtree2, + branch_node_hash_masks: HashMap::default(), + }, + ); proof1.extend(proof2); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index f90a53fa99a..148f7cd5d4d 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -35,6 +35,8 @@ pub struct ParallelProof { view: ConsistentDbView, /// Trie input. input: Arc, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -46,10 +48,17 @@ impl ParallelProof { Self { view, input, + collect_branch_node_hash_masks: false, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } } impl ParallelProof @@ -125,6 +134,7 @@ where hashed_address, ) .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) + .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) .storage_multiproof(target_slots) .map_err(|e| { ParallelStateRootError::StorageRoot(StorageRootError::Database( @@ -158,7 +168,9 @@ where // Create a hash builder to rebuild the root node since it is not available in the database. let retainer: ProofRetainer = targets.keys().map(Nibbles::unpack).collect(); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); let mut storages = HashMap::default(); let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); @@ -222,7 +234,19 @@ where #[cfg(feature = "metrics")] self.metrics.record_state_trie(tracker.finish()); - Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + let account_subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) } } diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index c344ec76239..8e3d0aec2ab 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -33,6 +33,8 @@ pub struct Proof { hashed_cursor_factory: H, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, } impl Proof { @@ -42,6 +44,7 @@ impl Proof { trie_cursor_factory: t, hashed_cursor_factory: h, prefix_sets: TriePrefixSetsMut::default(), + collect_branch_node_hash_masks: false, } } @@ -51,6 +54,7 @@ impl Proof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, prefix_sets: self.prefix_sets, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -60,6 +64,7 @@ impl Proof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, prefix_sets: self.prefix_sets, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -68,6 +73,12 @@ impl Proof { self.prefix_sets = prefix_sets; self } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } } impl Proof @@ -104,7 +115,9 @@ where // Create a hash builder to rebuild the root node since it is not available in the database. let retainer = targets.keys().map(Nibbles::unpack).collect(); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); // Initialize all storage multiproofs as empty. // Storage multiproofs for non empty tries will be overwritten if necessary. @@ -131,6 +144,7 @@ where hashed_address, ) .with_prefix_set_mut(storage_prefix_set) + .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account @@ -149,7 +163,19 @@ where } } let _ = hash_builder.root(); - Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + let account_subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) } } @@ -164,6 +190,8 @@ pub struct StorageProof { hashed_address: B256, /// The set of storage slot prefixes that have changed. prefix_set: PrefixSetMut, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, } impl StorageProof { @@ -179,6 +207,7 @@ impl StorageProof { hashed_cursor_factory: h, hashed_address, prefix_set: PrefixSetMut::default(), + collect_branch_node_hash_masks: false, } } @@ -189,6 +218,7 @@ impl StorageProof { hashed_cursor_factory: self.hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -199,6 +229,7 @@ impl StorageProof { hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -207,6 +238,12 @@ impl StorageProof { self.prefix_set = prefix_set; self } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } } impl StorageProof @@ -243,7 +280,9 @@ where let walker = TrieWalker::new(trie_cursor, self.prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); let mut storage_node_iter = TrieNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { @@ -260,6 +299,18 @@ where } let root = hash_builder.root(); - Ok(StorageMultiProof { root, subtree: hash_builder.take_proof_nodes() }) + let subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(StorageMultiProof { root, subtree, branch_node_hash_masks }) } } From 874cf8902286a006aec0088f75923fb5361caa4d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 4 Dec 2024 22:07:43 +0100 Subject: [PATCH 881/970] feat: add OpBuilder config (#13132) --- crates/optimism/payload/src/builder.rs | 43 +++++++++++++++----------- crates/optimism/payload/src/config.rs | 38 +++++++++++++++++++++++ 2 files changed, 63 insertions(+), 18 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index f370ed496f0..27778da8f42 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,12 +1,17 @@ //! Optimism payload builder implementation. -use std::{fmt::Display, sync::Arc}; - +use crate::{ + config::OpBuilderConfig, + error::OpPayloadBuilderError, + payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, +}; use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; +use op_alloy_consensus::DepositTransaction; +use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; @@ -26,9 +31,10 @@ use reth_provider::{ HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, PoolTransaction, TransactionPool, + noop::NoopTransactionPool, pool::BestPayloadTransactions, BestTransactionsAttributes, + PoolTransaction, TransactionPool, }; use revm::{ db::{states::bundle_state::BundleRetention, State}, @@ -38,25 +44,19 @@ use revm::{ }, Database, DatabaseCommit, }; +use std::{fmt::Display, sync::Arc}; use tracing::{debug, trace, warn}; -use crate::{ - error::OpPayloadBuilderError, - payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, -}; -use op_alloy_consensus::DepositTransaction; -use op_alloy_rpc_types_engine::OpPayloadAttributes; -use reth_revm::witness::ExecutionWitnessRecord; -use reth_transaction_pool::pool::BestPayloadTransactions; - /// Optimism's payload builder -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone)] pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, /// The type responsible for creating the evm. pub evm_config: EvmConfig, + /// Settings for the builder, e.g. DA settings. + pub config: OpBuilderConfig, /// The type responsible for yielding the best transactions for the payload if mempool /// transactions are allowed. pub best_transactions: Txs, @@ -64,8 +64,15 @@ pub struct OpPayloadBuilder { impl OpPayloadBuilder { /// `OpPayloadBuilder` constructor. - pub const fn new(evm_config: EvmConfig) -> Self { - Self { compute_pending_block: true, evm_config, best_transactions: () } + /// + /// Configures the builder with the default settings. + pub fn new(evm_config: EvmConfig) -> Self { + Self::with_builder_config(evm_config, Default::default()) + } + + /// Configures the builder with the given [`OpBuilderConfig`]. + pub const fn with_builder_config(evm_config: EvmConfig, config: OpBuilderConfig) -> Self { + Self { compute_pending_block: true, evm_config, config, best_transactions: () } } } @@ -82,8 +89,8 @@ impl OpPayloadBuilder { self, best_transactions: T, ) -> OpPayloadBuilder { - let Self { compute_pending_block, evm_config, .. } = self; - OpPayloadBuilder { compute_pending_block, evm_config, best_transactions } + let Self { compute_pending_block, evm_config, config, .. } = self; + OpPayloadBuilder { compute_pending_block, evm_config, best_transactions, config } } /// Enables the rollup's compute pending block configuration option. diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs index 5055c05c42e..469bfc9fe31 100644 --- a/crates/optimism/payload/src/config.rs +++ b/crates/optimism/payload/src/config.rs @@ -2,7 +2,34 @@ use std::sync::{atomic::AtomicU64, Arc}; +/// Settings for the OP builder. +#[derive(Debug, Clone, Default)] +pub struct OpBuilderConfig { + /// Data availability configuration for the OP builder. + pub da_config: OpDAConfig, +} + +impl OpBuilderConfig { + /// Creates a new OP builder configuration with the given data availability configuration. + pub const fn new(da_config: OpDAConfig) -> Self { + Self { da_config } + } + + /// Returns the Data Availability configuration for the OP builder, if it has configured + /// constraints. + pub fn constrained_da_config(&self) -> Option<&OpDAConfig> { + if self.da_config.is_empty() { + None + } else { + Some(&self.da_config) + } + } +} + /// Contains the Data Availability configuration for the OP builder. +/// +/// This type is shareable and can be used to update the DA configuration for the OP payload +/// builder. #[derive(Debug, Clone, Default)] pub struct OpDAConfig { inner: Arc, @@ -16,6 +43,11 @@ impl OpDAConfig { this } + /// Returns whether the configuration is empty. + pub fn is_empty(&self) -> bool { + self.max_da_tx_size().is_none() && self.max_da_block_size().is_none() + } + /// Returns the max allowed data availability size per transactions, if any. pub fn max_da_tx_size(&self) -> Option { let val = self.inner.max_da_tx_size.load(std::sync::atomic::Ordering::Relaxed); @@ -84,4 +116,10 @@ mod tests { assert_eq!(da.max_da_tx_size(), None); assert_eq!(da.max_da_block_size(), None); } + + #[test] + fn test_da_constrained() { + let config = OpBuilderConfig::default(); + assert!(config.constrained_da_config().is_none()); + } } From 1f6b7d1239f41772899fff4b71fe37ed9c67c1ce Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 4 Dec 2024 17:51:19 -0500 Subject: [PATCH 882/970] feat: wire network primitives to remaining components (#13143) --- crates/net/eth-wire/src/ethstream.rs | 6 +++--- crates/net/network/src/builder.rs | 4 +--- crates/net/network/src/config.rs | 12 +++++------ crates/net/network/src/eth_requests.rs | 10 +++++---- crates/net/network/src/fetch/mod.rs | 15 ++++++++----- crates/net/network/src/lib.rs | 15 ++++++++----- crates/net/network/src/manager.rs | 9 +++++--- crates/net/network/src/network.rs | 4 ++-- crates/net/network/src/session/conn.rs | 9 ++++---- crates/net/network/src/state.rs | 4 ++-- crates/net/network/src/test_utils/testnet.rs | 22 +++++++++++--------- crates/net/network/src/transactions/mod.rs | 16 +++++++------- crates/net/network/tests/it/connect.rs | 15 +++++++++---- crates/node/core/src/cli/config.rs | 4 ++-- examples/network-txpool/src/main.rs | 6 ++++-- 15 files changed, 87 insertions(+), 64 deletions(-) diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 675ea19a5ce..77266c1b703 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -537,7 +537,7 @@ mod tests { async fn can_write_and_read_cleartext() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); - let test_msg: EthMessage = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -572,7 +572,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg: EthMessage = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -614,7 +614,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg: EthMessage = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 13c932d4644..64b864ef957 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -65,15 +65,13 @@ impl NetworkBuilder { let request_handler = EthRequestHandler::new(client, peers, rx); NetworkBuilder { network, request_handler, transactions } } -} -impl NetworkBuilder { /// Creates a new [`TransactionsManager`] and wires it to the network. pub fn transactions( self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, - ) -> NetworkBuilder, Eth> { + ) -> NetworkBuilder, Eth, N> { let Self { mut network, request_handler, .. } = self; let (tx, rx) = mpsc::unbounded_channel(); network.set_transactions(tx); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index a9ce67821b9..7d3f932b418 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -145,19 +145,17 @@ where } } -impl NetworkConfig +impl NetworkConfig where - C: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - > + HeaderProvider + N: NetworkPrimitives, + C: BlockReader + + HeaderProvider + Clone + Unpin + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. - pub async fn start_network(self) -> Result { + pub async fn start_network(self) -> Result, NetworkError> { let client = self.client.clone(); let (handle, network, _txpool, eth) = NetworkManager::builder::(self) .await? diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index ee8640daaa9..bf0110f4270 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -78,8 +78,9 @@ impl EthRequestHandler { } } -impl EthRequestHandler +impl EthRequestHandler where + N: NetworkPrimitives, C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers @@ -222,10 +223,11 @@ where /// An endless future. /// /// This should be spawned or used as part of `tokio::select!`. -impl Future for EthRequestHandler +impl Future for EthRequestHandler where - C: BlockReader - + HeaderProvider

+ N: NetworkPrimitives, + C: BlockReader + + HeaderProvider
+ Unpin, { type Output = (); diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index c5474587adf..345df4f2e09 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -477,7 +477,8 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -497,7 +498,8 @@ mod tests { #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -520,7 +522,8 @@ mod tests { #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -545,7 +548,8 @@ mod tests { #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); let peer_id = B512::random(); assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -575,7 +579,8 @@ mod tests { #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); let peer_id = B512::random(); let request_pair = || { diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 0eae99e7c50..fadeb1f6519 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -58,10 +58,12 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client); +//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) +//! .boot_nodes(mainnet_nodes()) +//! .build(client); //! //! // create the network instance -//! let network = NetworkManager::::new(config).await.unwrap(); +//! let network = NetworkManager::new(config).await.unwrap(); //! //! // keep a handle to the network and spawn it //! let handle = network.handle().clone(); @@ -73,7 +75,9 @@ //! ### Configure all components of the Network with the [`NetworkBuilder`] //! //! ``` -//! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network::{ +//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, +//! }; //! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! use reth_transaction_pool::TransactionPool; @@ -84,8 +88,9 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = -//! NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); +//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) +//! .boot_nodes(mainnet_nodes()) +//! .build(client.clone()); //! let transactions_manager_config = config.transactions_manager_config.clone(); //! //! // create the network instance diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 89e21b9dd2d..5e580df883f 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -292,7 +292,9 @@ impl NetworkManager { /// components of the network /// /// ``` - /// use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; + /// use reth_network::{ + /// config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, + /// }; /// use reth_network_peers::mainnet_nodes; /// use reth_provider::test_utils::NoopProvider; /// use reth_transaction_pool::TransactionPool; @@ -303,8 +305,9 @@ impl NetworkManager { /// // The key that's used for encrypting sessions and to identify our node. /// let local_key = rng_secret_key(); /// - /// let config = - /// NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); + /// let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) + /// .boot_nodes(mainnet_nodes()) + /// .build(client.clone()); /// let transactions_manager_config = config.transactions_manager_config.clone(); /// /// // create the network instance diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 225b6332e0e..68c57724f0d 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -205,8 +205,8 @@ impl NetworkPeersEvents for NetworkHandle { } } -impl NetworkEventListenerProvider for NetworkHandle { - fn event_listener(&self) -> EventStream>> { +impl NetworkEventListenerProvider> for NetworkHandle { + fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() } diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 45b83d1c487..6f87c26d6f5 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -143,15 +143,16 @@ impl Sink> for EthRlpxConnection { mod tests { use super::*; - const fn assert_eth_stream() + const fn assert_eth_stream() where - St: Stream> + Sink, + N: NetworkPrimitives, + St: Stream, EthStreamError>> + Sink>, { } #[test] const fn test_eth_stream_variants() { - assert_eth_stream::(); - assert_eth_stream::(); + assert_eth_stream::>(); + assert_eth_stream::>(); } } diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 5d7c0a9f654..4dffadb2547 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -563,7 +563,7 @@ mod tests { use alloy_consensus::Header; use alloy_primitives::B256; - use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthVersion}; + use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthNetworkPrimitives, EthVersion}; use reth_network_api::PeerRequestSender; use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_peers::PeerId; @@ -581,7 +581,7 @@ mod tests { }; /// Returns a testing instance of the [`NetworkState`]. - fn state() -> NetworkState { + fn state() -> NetworkState { let peers = PeersManager::default(); let handle = peers.handle(); NetworkState { diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index a27df7e7202..249cd1a6beb 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -11,7 +11,9 @@ use crate::{ use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_chainspec::{Hardforks, MAINNET}; -use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; +use reth_eth_wire::{ + protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols, +}; use reth_network_api::{ events::{PeerEvent, SessionInfo}, test_utils::{PeersHandle, PeersHandleProvider}, @@ -140,7 +142,7 @@ where } /// Returns all handles to the networks - pub fn handles(&self) -> impl Iterator + '_ { + pub fn handles(&self) -> impl Iterator> + '_ { self.peers.iter().map(|p| p.handle()) } @@ -346,11 +348,11 @@ impl TestnetHandle { #[derive(Debug)] pub struct Peer { #[pin] - network: NetworkManager, + network: NetworkManager, #[pin] - request_handler: Option>, + request_handler: Option>, #[pin] - transactions_manager: Option>, + transactions_manager: Option>, pool: Option, client: C, secret_key: SecretKey, @@ -393,12 +395,12 @@ where } /// Returns mutable access to the network. - pub fn network_mut(&mut self) -> &mut NetworkManager { + pub fn network_mut(&mut self) -> &mut NetworkManager { &mut self.network } /// Returns the [`NetworkHandle`] of this peer. - pub fn handle(&self) -> NetworkHandle { + pub fn handle(&self) -> NetworkHandle { self.network.handle().clone() } @@ -506,8 +508,8 @@ pub struct PeerConfig { /// A handle to a peer in the [`Testnet`]. #[derive(Debug)] pub struct PeerHandle { - network: NetworkHandle, - transactions: Option, + network: NetworkHandle, + transactions: Option>, pool: Option, } @@ -545,7 +547,7 @@ impl PeerHandle { } /// Returns the [`NetworkHandle`] of this peer. - pub const fn network(&self) -> &NetworkHandle { + pub const fn network(&self) -> &NetworkHandle { &self.network } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 2e6e2f08b65..3864c0fcc0b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -282,14 +282,14 @@ pub struct TransactionsManager TransactionsManager { +impl TransactionsManager { /// Sets up a new instance. /// /// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance. pub fn new( - network: NetworkHandle, + network: NetworkHandle, pool: Pool, - from_network: mpsc::UnboundedReceiver, + from_network: mpsc::UnboundedReceiver>, transactions_manager_config: TransactionsManagerConfig, ) -> Self { let network_events = network.event_listener(); @@ -332,9 +332,7 @@ impl TransactionsManager { metrics, } } -} -impl TransactionsManager { /// Returns a new handle that can send commands to this type. pub fn handle(&self) -> TransactionsHandle { TransactionsHandle { manager_tx: self.command_tx.clone() } @@ -1928,7 +1926,9 @@ mod tests { use tests::fetcher::TxFetchMetadata; use tracing::error; - async fn new_tx_manager() -> (TransactionsManager, NetworkManager) { + async fn new_tx_manager( + ) -> (TransactionsManager, NetworkManager) + { let secret_key = SecretKey::new(&mut rand::thread_rng()); let client = NoopProvider::default(); @@ -1959,7 +1959,7 @@ mod tests { pub(super) fn new_mock_session( peer_id: PeerId, version: EthVersion, - ) -> (PeerMetadata, mpsc::Receiver) { + ) -> (PeerMetadata, mpsc::Receiver) { let (to_mock_session_tx, to_mock_session_rx) = mpsc::channel(1); ( @@ -1991,7 +1991,7 @@ mod tests { let client = NoopProvider::default(); let pool = testing_pool(); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(client); diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 77044f4b72d..cc545b626a0 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -232,7 +232,9 @@ async fn test_connect_with_builder() { discv4.add_boot_nodes(mainnet_nodes()); let client = NoopProvider::default(); - let config = NetworkConfigBuilder::new(secret_key).discovery(discv4).build(client); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(client); let (handle, network, _, requests) = NetworkManager::new(config) .await .unwrap() @@ -268,7 +270,9 @@ async fn test_connect_to_trusted_peer() { let discv4 = Discv4Config::builder(); let client = NoopProvider::default(); - let config = NetworkConfigBuilder::new(secret_key).discovery(discv4).build(client); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(client); let transactions_manager_config = config.transactions_manager_config.clone(); let (handle, network, transactions, requests) = NetworkManager::new(config) .await @@ -470,7 +474,7 @@ async fn test_geth_disconnect() { tokio::time::timeout(GETH_TIMEOUT, async move { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .build(NoopProvider::default()); @@ -694,7 +698,10 @@ async fn test_rejected_by_already_connect() { assert_eq!(handle.num_connected_peers(), 2); } -async fn new_random_peer(max_in_bound: usize, trusted_nodes: Vec) -> NetworkManager { +async fn new_random_peer( + max_in_bound: usize, + trusted_nodes: Vec, +) -> NetworkManager { let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(max_in_bound).with_trusted_nodes(trusted_nodes); diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 73ada50fcd2..27325632db9 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -1,7 +1,7 @@ //! Config traits for various node components. use alloy_primitives::Bytes; -use reth_network::protocol::IntoRlpxSubProtocol; +use reth_network::{protocol::IntoRlpxSubProtocol, NetworkPrimitives}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, time::Duration}; @@ -49,7 +49,7 @@ pub trait RethNetworkConfig { // TODO add more network config methods here } -impl RethNetworkConfig for reth_network::NetworkManager { +impl RethNetworkConfig for reth_network::NetworkManager { fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { Self::add_rlpx_sub_protocol(self, protocol); } diff --git a/examples/network-txpool/src/main.rs b/examples/network-txpool/src/main.rs index e66185ad828..716e6cc57c9 100644 --- a/examples/network-txpool/src/main.rs +++ b/examples/network-txpool/src/main.rs @@ -7,7 +7,7 @@ //! cargo run --release -p network-txpool -- node //! ``` -use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +use reth_network::{config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::ValidTransaction, CoinbaseTipOrdering, @@ -34,7 +34,9 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) + .mainnet_boot_nodes() + .build(client); let transactions_manager_config = config.transactions_manager_config.clone(); // create the network instance let (_handle, network, txpool, _) = NetworkManager::builder(config) From 33f8e5cd012c9cb3c1b7d63fa8bac3af1afbdfb7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 5 Dec 2024 02:39:52 -0600 Subject: [PATCH 883/970] Bump `op-alloy` (#13142) --- Cargo.lock | 242 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 8 +- 2 files changed, 126 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea4ffec0dbc..4edd2bd2e76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.21" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-chains" @@ -161,7 +161,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -265,7 +265,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", ] @@ -291,7 +291,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -319,7 +319,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", "url", ] @@ -341,7 +341,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.2", "hex-literal", - "indexmap 2.7.0", + "indexmap 2.6.0", "itoa", "k256", "keccak-asm", @@ -390,7 +390,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", "url", @@ -523,7 +523,7 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -604,7 +604,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -642,7 +642,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -660,7 +660,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -686,7 +686,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.7.0", + "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", "quote", @@ -745,7 +745,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tower 0.5.1", "tracing", @@ -897,9 +897,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "aquamarine" @@ -1385,7 +1385,7 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.7.0", + "indexmap 2.6.0", "num-bigint", "rustc-hash 2.1.0", ] @@ -1411,7 +1411,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.7.0", + "indexmap 2.6.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1457,7 +1457,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.7.0", + "indexmap 2.6.0", "once_cell", "phf", "rustc-hash 2.1.0", @@ -1783,9 +1783,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.22" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1793,9 +1793,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.22" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -2655,7 +2655,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "walkdir", ] @@ -2811,7 +2811,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -2899,7 +2899,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", ] @@ -3539,7 +3539,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3700,9 +3700,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -4134,9 +4134,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "arbitrary", "equivalent", @@ -4163,7 +4163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.7.0", + "indexmap 2.6.0", "is-terminal", "itoa", "log", @@ -4892,7 +4892,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.7.0", + "indexmap 2.6.0", "metrics", "metrics-util", "quanta", @@ -4924,7 +4924,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.7.0", + "indexmap 2.6.0", "metrics", "ordered-float", "quanta", @@ -5330,9 +5330,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77284451ec70602f148f4f3bc6d1106fdfefd57c11ff459c4b2985e400ed1a18" +checksum = "78f0daa0d0936d436a21b57571b1e27c5663aa2ab62f6edae5ba5be999f9f93e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5343,14 +5343,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-genesis" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912ec93ec839076e8bbaaf7bd3d80aeedbe38cd5e8e3e76dfc67d217637e651" +checksum = "3eb0964932faa7050b74689f017aca66ffa3e52501080278a81bb0a43836c8dd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5358,14 +5358,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef4620ba6309ecc18e1aaa339836ca839b001a420ca245add040a3bde1ae9b1" +checksum = "cd9a690fcc404e44c3589dd39cf22895df42f7ef8671a07828b8c376c39be46a" dependencies = [ "alloy-consensus", "alloy-network", @@ -5378,9 +5378,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab24c1b9c21cedd691938b5667c951b04ae8b89429d7cb7a88f30afb79cbbf1" +checksum = "6d8c057c1a5bdf72d1f86c470a4d90f2d2ad1b273caa547c04cd6affe45b466d" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5395,16 +5395,16 @@ dependencies = [ "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdc32eba4d43bbd23f1f16dece7afd991d41ab4ffc2494a72b048e9f38db622" +checksum = "73741855ffaa2041b33cb616d7db7180c1149b648c68c23bee9e15501073fb32" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5421,9 +5421,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07175fcfd9d03a587ece7ce79fc288331e6d9ae523464eb677c751d5737713b" +checksum = "ebedc32e24013c8b3faea62d091bccbb90f871286fe2238c6f7e2ff29974df8e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5432,10 +5432,11 @@ dependencies = [ "derive_more 1.0.0", "ethereum_ssz", "op-alloy-consensus", + "op-alloy-genesis", "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -5515,9 +5516,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5526,19 +5527,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.90", ] [[package]] @@ -6058,7 +6060,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -6077,7 +6079,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.4", + "thiserror 2.0.3", "tinyvec", "tracing", "web-time", @@ -6535,7 +6537,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -6571,7 +6573,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tower 0.4.13", "tracing", @@ -6625,7 +6627,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -6781,7 +6783,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tikv-jemallocator", "tracy-client", ] @@ -6926,7 +6928,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -6983,7 +6985,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", ] @@ -7025,7 +7027,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -7050,7 +7052,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7076,7 +7078,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -7115,7 +7117,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7192,7 +7194,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7248,7 +7250,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", ] @@ -7275,7 +7277,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", ] @@ -7330,7 +7332,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7376,7 +7378,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -7409,7 +7411,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7438,7 +7440,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -7504,7 +7506,7 @@ dependencies = [ "proptest-derive", "rustc-hash 2.1.0", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -7605,7 +7607,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -7700,7 +7702,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", ] @@ -7727,7 +7729,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -7770,7 +7772,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7787,7 +7789,7 @@ dependencies = [ "criterion", "dashmap 6.1.0", "derive_more 1.0.0", - "indexmap 2.7.0", + "indexmap 2.6.0", "parking_lot", "pprof", "rand 0.8.5", @@ -7795,7 +7797,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", ] @@ -7834,7 +7836,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -7894,7 +7896,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7919,7 +7921,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", ] @@ -7957,7 +7959,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "url", ] @@ -7988,7 +7990,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", "zstd", ] @@ -8122,7 +8124,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "toml", "tracing", @@ -8450,7 +8452,7 @@ dependencies = [ "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.4", + "thiserror 2.0.3", "tracing", ] @@ -8517,7 +8519,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -8582,7 +8584,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", ] @@ -8757,7 +8759,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -8778,7 +8780,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.4", + "thiserror 2.0.3", "toml", ] @@ -8864,7 +8866,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tower 0.4.13", @@ -8958,7 +8960,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-util", "tower 0.4.13", @@ -8999,7 +9001,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9083,7 +9085,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9183,7 +9185,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9210,7 +9212,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9316,7 +9318,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", "tracing-futures", @@ -9400,7 +9402,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9515,7 +9517,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.4", + "thiserror 2.0.3", "tokio", "tracing", ] @@ -9541,7 +9543,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.4", + "thiserror 2.0.3", ] [[package]] @@ -9561,9 +9563,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.12.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" +checksum = "41bbeb6004cc4ed48d27756f0479011df91a6f5642a3abab9309eda5ce67c4ad" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10180,7 +10182,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.6.0", "itoa", "memchr", "ryu", @@ -10240,7 +10242,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -10484,9 +10486,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", @@ -10775,11 +10777,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.4" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ - "thiserror-impl 2.0.4", + "thiserror-impl 2.0.3", ] [[package]] @@ -10795,9 +10797,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.4" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", @@ -10856,9 +10858,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -10880,9 +10882,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -10934,9 +10936,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -11042,7 +11044,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index 550a7135206..efc186864a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -470,10 +470,10 @@ alloy-transport-ipc = { version = "0.7.2", default-features = false } alloy-transport-ws = { version = "0.7.2", default-features = false } # op -op-alloy-rpc-types = "0.7.2" -op-alloy-rpc-types-engine = "0.7.2" -op-alloy-network = "0.7.2" -op-alloy-consensus = "0.7.2" +op-alloy-rpc-types = "0.7.3" +op-alloy-rpc-types-engine = "0.7.3" +op-alloy-network = "0.7.3" +op-alloy-consensus = "0.7.3" # misc aquamarine = "0.6" From 08c9df8cb37300b45fdc107fe37163f04429e833 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 11:15:03 +0100 Subject: [PATCH 884/970] chore: bump pprof (#13148) --- Cargo.lock | 141 ++++++++++++++++++++++++++++++----------------------- Cargo.toml | 4 +- 2 files changed, 82 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4edd2bd2e76..d645996b5c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0966165eaf052580bd70eb1b32cb3d6245774c0104d1b2793e9650bf83b52a" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -124,7 +133,7 @@ dependencies = [ "arbitrary", "auto_impl", "c-kzg", - "derive_more 1.0.0", + "derive_more", "rand 0.8.5", "serde", "serde_with", @@ -175,7 +184,7 @@ dependencies = [ "alloy-sol-type-parser", "alloy-sol-types", "const-hex", - "derive_more 1.0.0", + "derive_more", "itoa", "serde", "serde_json", @@ -204,7 +213,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", - "derive_more 1.0.0", + "derive_more", "k256", "rand 0.8.5", "serde", @@ -224,7 +233,7 @@ dependencies = [ "alloy-serde", "arbitrary", "c-kzg", - "derive_more 1.0.0", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "once_cell", @@ -336,7 +345,7 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 1.0.0", + "derive_more", "foldhash", "getrandom 0.2.15", "hashbrown 0.15.2", @@ -547,7 +556,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", - "derive_more 1.0.0", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -572,7 +581,7 @@ dependencies = [ "alloy-serde", "alloy-sol-types", "arbitrary", - "derive_more 1.0.0", + "derive_more", "itertools 0.13.0", "jsonrpsee-types", "serde", @@ -816,7 +825,7 @@ dependencies = [ "arbitrary", "arrayvec", "derive_arbitrary", - "derive_more 1.0.0", + "derive_more", "nybbles", "proptest", "proptest-derive", @@ -2426,17 +2435,6 @@ dependencies = [ "syn 2.0.90", ] -[[package]] -name = "derive_more" -version = "0.99.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.90", -] - [[package]] name = "derive_more" version = "1.0.0" @@ -2733,6 +2731,26 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "equator" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c35da53b5a021d2484a7cc49b2ac7f2d840f8236a286f84202369bd338d761ea" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -3854,23 +3872,23 @@ dependencies = [ [[package]] name = "iai-callgrind" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd871e6374d5ca2d9b48dd23b3c7ef63a4201728621f6d75937dfcc66e91809" +checksum = "22275f8051874cd2f05b2aa1e0098d5cbec34df30ff92f1a1e2686a4cefed870" dependencies = [ "bincode", - "derive_more 0.99.18", + "derive_more", "iai-callgrind-macros", "iai-callgrind-runner", ] [[package]] name = "iai-callgrind-macros" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397649417510422ded7033f86132f833cca8c2e5081d0dfbec939b2353da7021" +checksum = "e8e6677dc52bd798b988e62ffd6831bf7eb46e4348cb1c74c1164954ebd0e5a1" dependencies = [ - "derive_more 0.99.18", + "derive_more", "proc-macro-error2", "proc-macro2", "quote", @@ -3881,9 +3899,9 @@ dependencies = [ [[package]] name = "iai-callgrind-runner" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3783c337f9e931af702b5d5835ff2a6824bf55e416461a4e042dfb4b8fdbbea" +checksum = "a02dd95fe4949513b45a328b5b18f527ee02e96f3428b48090aa7cf9043ab0b8" dependencies = [ "serde", ] @@ -5340,7 +5358,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "arbitrary", - "derive_more 1.0.0", + "derive_more", "serde", "serde_with", "thiserror 2.0.3", @@ -5413,7 +5431,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-serde", "arbitrary", - "derive_more 1.0.0", + "derive_more", "op-alloy-consensus", "serde", "serde_json", @@ -5429,7 +5447,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", - "derive_more 1.0.0", + "derive_more", "ethereum_ssz", "op-alloy-consensus", "op-alloy-genesis", @@ -5784,10 +5802,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0" dependencies = [ + "aligned-vec", "backtrace", "cfg-if", "criterion", @@ -6639,7 +6658,7 @@ dependencies = [ "alloy-primitives", "alloy-signer", "alloy-signer-local", - "derive_more 1.0.0", + "derive_more", "metrics", "parking_lot", "pin-project", @@ -6671,7 +6690,7 @@ dependencies = [ "alloy-rlp", "alloy-trie", "auto_impl", - "derive_more 1.0.0", + "derive_more", "once_cell", "reth-ethereum-forks", "reth-network-peers", @@ -6845,7 +6864,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "auto_impl", - "derive_more 1.0.0", + "derive_more", "reth-primitives", "reth-primitives-traits", ] @@ -6900,7 +6919,7 @@ dependencies = [ "assert_matches", "bytes", "criterion", - "derive_more 1.0.0", + "derive_more", "eyre", "iai-callgrind", "metrics", @@ -6940,7 +6959,7 @@ dependencies = [ "alloy-primitives", "arbitrary", "bytes", - "derive_more 1.0.0", + "derive_more", "metrics", "modular-bitfield", "parity-scale-codec", @@ -7039,7 +7058,7 @@ version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", + "derive_more", "discv5", "enr", "futures", @@ -7137,7 +7156,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-signer", "alloy-signer-local", - "derive_more 1.0.0", + "derive_more", "eyre", "futures-util", "jsonrpsee", @@ -7392,7 +7411,7 @@ dependencies = [ "arbitrary", "async-stream", "bytes", - "derive_more 1.0.0", + "derive_more", "futures", "pin-project", "proptest", @@ -7430,7 +7449,7 @@ dependencies = [ "alloy-rlp", "arbitrary", "bytes", - "derive_more 1.0.0", + "derive_more", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", @@ -7788,7 +7807,7 @@ dependencies = [ "byteorder", "criterion", "dashmap 6.1.0", - "derive_more 1.0.0", + "derive_more", "indexmap 2.6.0", "parking_lot", "pprof", @@ -7854,7 +7873,7 @@ dependencies = [ "aquamarine", "auto_impl", "criterion", - "derive_more 1.0.0", + "derive_more", "discv5", "enr", "futures", @@ -7911,7 +7930,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", "auto_impl", - "derive_more 1.0.0", + "derive_more", "enr", "futures", "reth-eth-wire-types", @@ -7934,7 +7953,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "auto_impl", - "derive_more 1.0.0", + "derive_more", "futures", "parking_lot", "reth-consensus", @@ -7983,7 +8002,7 @@ version = "1.1.2" dependencies = [ "anyhow", "bincode", - "derive_more 1.0.0", + "derive_more", "lz4_flex", "memmap2", "rand 0.8.5", @@ -8090,7 +8109,7 @@ dependencies = [ "alloy-rpc-types-engine", "clap", "const_format", - "derive_more 1.0.0", + "derive_more", "dirs-next", "eyre", "futures", @@ -8246,7 +8265,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", - "derive_more 1.0.0", + "derive_more", "once_cell", "op-alloy-rpc-types", "reth-chainspec", @@ -8266,7 +8285,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "clap", - "derive_more 1.0.0", + "derive_more", "eyre", "futures-util", "op-alloy-consensus", @@ -8334,7 +8353,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", - "derive_more 1.0.0", + "derive_more", "op-alloy-consensus", "reth-chainspec", "reth-consensus", @@ -8466,7 +8485,7 @@ dependencies = [ "alloy-rlp", "arbitrary", "bytes", - "derive_more 1.0.0", + "derive_more", "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", @@ -8626,7 +8645,7 @@ dependencies = [ "bytes", "c-kzg", "criterion", - "derive_more 1.0.0", + "derive_more", "k256", "modular-bitfield", "once_cell", @@ -8668,7 +8687,7 @@ dependencies = [ "bincode", "byteorder", "bytes", - "derive_more 1.0.0", + "derive_more", "modular-bitfield", "op-alloy-consensus", "proptest", @@ -8772,7 +8791,7 @@ dependencies = [ "arbitrary", "assert_matches", "bytes", - "derive_more 1.0.0", + "derive_more", "modular-bitfield", "proptest", "proptest-arbitrary-interop", @@ -8826,7 +8845,7 @@ dependencies = [ "alloy-signer", "alloy-signer-local", "async-trait", - "derive_more 1.0.0", + "derive_more", "futures", "http", "http-body", @@ -9058,7 +9077,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-sol-types", - "derive_more 1.0.0", + "derive_more", "futures", "itertools 0.13.0", "jsonrpsee-core", @@ -9265,7 +9284,7 @@ version = "1.1.2" dependencies = [ "alloy-primitives", "clap", - "derive_more 1.0.0", + "derive_more", "serde", "strum", ] @@ -9301,7 +9320,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", + "derive_more", "reth-fs-util", "reth-primitives-traits", "reth-static-file-types", @@ -9451,7 +9470,7 @@ dependencies = [ "bincode", "bytes", "criterion", - "derive_more 1.0.0", + "derive_more", "hash-db", "itertools 0.13.0", "nybbles", @@ -9473,7 +9492,7 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", + "derive_more", "metrics", "proptest", "proptest-arbitrary-interop", @@ -9502,7 +9521,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "criterion", - "derive_more 1.0.0", + "derive_more", "itertools 0.13.0", "metrics", "proptest", diff --git a/Cargo.toml b/Cargo.toml index efc186864a7..18b2ab655ab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -595,8 +595,8 @@ toml = "0.8" arbitrary = "1.3" assert_matches = "1.5.0" criterion = "0.5" -iai-callgrind = "0.13" -pprof = "0.13" +iai-callgrind = "0.14" +pprof = "0.14" proptest = "1.4" proptest-derive = "0.5" serial_test = { default-features = false, version = "3" } From 305ca2f9a5a74a8470d4120c8b307ac97dc2b58e Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:34:12 +0000 Subject: [PATCH 885/970] docs: add docker and error code 13 to troubleshooting chapter (#13133) --- book/run/troubleshooting.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index cab39cb1165..7b8ec6ba19c 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -2,8 +2,40 @@ This page tries to answer how to deal with the most popular issues. +- [Troubleshooting](#troubleshooting) + - [Database](#database) + - [Docker](#docker) + - [Error code 13](#error-code-13) + - [Slow database inserts and updates](#slow-database-inserts-and-updates) + - [Compact the database](#compact-the-database) + - [Re-sync from scratch](#re-sync-from-scratch) + - [Database write error](#database-write-error) + - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker) + - [Hardware Performance Testing](#hardware-performance-testing) + - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone) + + ## Database +### Docker + +Externally accessing a `datadir` inside a named docker volume will usually come with folder/file ownership/permissions issues. + +**It is not recommended** to use the path to the named volume as it will trigger an error code 13. `RETH_DB_PATH: /var/lib/docker/volumes/named_volume/_data/eth/db cargo r --examples db-access --path ` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead. + +### Error code 13 + +`the environment opened in read-only code: 13` + +Externally accessing a database in a read-only folder is not supported, **UNLESS** there's no `mdbx.lck` present, and it's called with `exclusive` on calling `open_db_read_only`. Meaning that there's no node syncing concurrently. + +If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed: + +```bash,ignore +stat /full/path/datadir +``` + + ### Slow database inserts and updates If you're: From 0016d91ed6e3028a7ff707d6b2ed50ae303367b5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 11:35:09 +0100 Subject: [PATCH 886/970] feat: install op miner endpoint (#13147) --- Cargo.lock | 14 ++++++ Cargo.toml | 1 + crates/node/builder/src/rpc.rs | 6 +-- crates/optimism/node/src/node.rs | 77 ++++++++++++++++++++++++++------ crates/optimism/rpc/Cargo.toml | 1 + crates/optimism/rpc/src/lib.rs | 1 + crates/optimism/rpc/src/miner.rs | 32 +++++++++++++ 7 files changed, 115 insertions(+), 17 deletions(-) create mode 100644 crates/optimism/rpc/src/miner.rs diff --git a/Cargo.lock b/Cargo.lock index d645996b5c2..306e6a992fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5418,6 +5418,19 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "op-alloy-rpc-jsonrpsee" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a98debc5266443e64e03195cd1a3b6cdbe8d8679e9d8c4b76a3670d24b2e267a" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "jsonrpsee", + "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", +] + [[package]] name = "op-alloy-rpc-types" version = "0.7.3" @@ -8512,6 +8525,7 @@ dependencies = [ "jsonrpsee-types", "op-alloy-consensus", "op-alloy-network", + "op-alloy-rpc-jsonrpsee", "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "parking_lot", diff --git a/Cargo.toml b/Cargo.toml index 18b2ab655ab..f9fc6f2ca26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -472,6 +472,7 @@ alloy-transport-ws = { version = "0.7.2", default-features = false } # op op-alloy-rpc-types = "0.7.3" op-alloy-rpc-types-engine = "0.7.3" +op-alloy-rpc-jsonrpsee = "0.7.3" op-alloy-network = "0.7.3" op-alloy-consensus = "0.7.3" diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 24b7db77d88..edc10fecc58 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -419,7 +419,7 @@ where ext: F, ) -> eyre::Result> where - F: FnOnce(&mut TransportRpcModules) -> eyre::Result<()>, + F: FnOnce(&mut TransportRpcModules, &mut AuthRpcModule) -> eyre::Result<()>, { let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; @@ -477,7 +477,7 @@ where let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; - ext(ctx.modules)?; + ext(ctx.modules, ctx.auth_module)?; extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); @@ -537,7 +537,7 @@ where type Handle = RpcHandle; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { - self.launch_add_ons_with(ctx, |_| Ok(())).await + self.launch_add_ons_with(ctx, |_, _| Ok(())).await } } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b2203331ddf..c7552135233 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -27,9 +27,10 @@ use reth_node_builder::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; -use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_payload_builder::{builder::OpPayloadTransactions, config::OpDAConfig}; use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::{ + miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, OpEthApi, SequencerClient, }; @@ -112,12 +113,25 @@ impl ChainStorage for OpStorage { pub struct OpNode { /// Additional Optimism args pub args: RollupArgs, + /// Data availability configuration for the OP builder. + /// + /// Used to throttle the size of the data availability payloads (configured by the batcher via + /// the `miner_` api). + /// + /// By default no throttling is applied. + pub da_config: OpDAConfig, } impl OpNode { /// Creates a new instance of the Optimism node type. - pub const fn new(args: RollupArgs) -> Self { - Self { args } + pub fn new(args: RollupArgs) -> Self { + Self { args, da_config: OpDAConfig::default() } + } + + /// Configure the data availability configuration for the OP builder. + pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self { + self.da_config = da_config; + self } /// Returns the components for the given [`RollupArgs`]. @@ -182,7 +196,10 @@ where } fn add_ons(&self) -> Self::AddOns { - Self::AddOns::builder().with_sequencer(self.args.sequencer_http.clone()).build() + Self::AddOns::builder() + .with_sequencer(self.args.sequencer_http.clone()) + .with_da_config(self.da_config.clone()) + .build() } } @@ -199,7 +216,13 @@ impl NodeTypesWithEngine for OpNode { /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); +pub struct OpAddOns { + /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers + /// and eth-api. + pub rpc_add_ons: RpcAddOns, OpEngineValidatorBuilder>, + /// Data availability configuration for the OP builder. + pub da_config: OpDAConfig, +} impl>> Default for OpAddOns { fn default() -> Self { @@ -228,14 +251,29 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { + let Self { rpc_add_ons, da_config } = self; // install additional OP specific rpc methods let debug_ext = OpDebugWitnessApi::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + let miner_ext = OpMinerExtApi::new(da_config); - self.0 - .launch_add_ons_with(ctx, move |modules| { + rpc_add_ons + .launch_add_ons_with(ctx, move |modules, auth_modules| { debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; + + // extend the miner namespace if configured in the regular http server + modules.merge_if_module_configured( + RethRpcModule::Miner, + miner_ext.clone().into_rpc(), + )?; + + // install the miner extension in the authenticated if configured + if modules.module_config().contains_any(&RethRpcModule::Miner) { + debug!(target: "reth::cli", "Installing miner DA rpc enddpoint"); + auth_modules.merge_auth_methods(miner_ext.into_rpc())?; + } + Ok(()) }) .await @@ -253,7 +291,7 @@ where type EthApi = OpEthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { - self.0.hooks_mut() + self.rpc_add_ons.hooks_mut() } } @@ -276,6 +314,8 @@ pub struct OpAddOnsBuilder { /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_client: Option, + /// Data availability configuration for the OP builder. + da_config: Option, } impl OpAddOnsBuilder { @@ -284,6 +324,12 @@ impl OpAddOnsBuilder { self.sequencer_client = sequencer_client.map(SequencerClient::new); self } + + /// Configure the data availability configuration for the OP builder. + pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self { + self.da_config = Some(da_config); + self + } } impl OpAddOnsBuilder { @@ -292,12 +338,15 @@ impl OpAddOnsBuilder { where N: FullNodeComponents>, { - let Self { sequencer_client, .. } = self; - - OpAddOns(RpcAddOns::new( - move |ctx| OpEthApi::::builder().with_sequencer(sequencer_client).build(ctx), - Default::default(), - )) + let Self { sequencer_client, da_config } = self; + + OpAddOns { + rpc_add_ons: RpcAddOns::new( + move |ctx| OpEthApi::::builder().with_sequencer(sequencer_client).build(ctx), + Default::default(), + ), + da_config: da_config.unwrap_or_default(), + } } } diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 968beaf9e83..d4a0b1fce27 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -45,6 +45,7 @@ alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true op-alloy-rpc-types-engine.workspace = true +op-alloy-rpc-jsonrpsee.workspace = true op-alloy-consensus.workspace = true revm.workspace = true diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 0fa0debdf33..b76058ce531 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,7 @@ pub mod error; pub mod eth; +pub mod miner; pub mod sequencer; pub mod witness; diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs new file mode 100644 index 00000000000..bfdee467647 --- /dev/null +++ b/crates/optimism/rpc/src/miner.rs @@ -0,0 +1,32 @@ +//! Miner API extension for OP. + +use alloy_primitives::U64; +use jsonrpsee_core::{async_trait, RpcResult}; +pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; +use reth_optimism_payload_builder::config::OpDAConfig; +use tracing::debug; + +/// Miner API extension for OP, exposes settings for the data availability configuration via the +/// `miner_` API. +#[derive(Debug, Clone)] +pub struct OpMinerExtApi { + da_config: OpDAConfig, +} + +impl OpMinerExtApi { + /// Instantiate the miner API extension with the given, sharable data availability + /// configuration. + pub const fn new(da_config: OpDAConfig) -> Self { + Self { da_config } + } +} + +#[async_trait] +impl MinerApiExtServer for OpMinerExtApi { + /// Handler for `miner_setMaxDASize` RPC method. + async fn set_max_da_size(&self, max_tx_size: U64, max_block_size: U64) -> RpcResult<()> { + debug!(target: "rpc", "Setting max DA size: tx={}, block={}", max_tx_size, max_block_size); + self.da_config.set_max_da_size(max_tx_size.to(), max_block_size.to()); + Ok(()) + } +} From e022b6fd92a33cd44e3ae51ee2fc2ecc0f773222 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 11:51:26 +0100 Subject: [PATCH 887/970] chore: bump alloy 073 (#13150) --- Cargo.lock | 113 +++++++++--------- Cargo.toml | 56 ++++----- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 1 + .../net/downloaders/src/bodies/test_utils.rs | 1 + crates/primitives-traits/src/block/header.rs | 8 +- crates/stages/stages/src/stages/bodies.rs | 30 ++--- 7 files changed, 101 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 306e6a992fc..ce20350b60d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73dd0ab7003dfa3efd252e423873cd3bc241d1456147e752f995cc8aabd1d1f6" +checksum = "a101d4d016f47f13890a74290fdd17b05dd175191d9337bc600791fb96e4dea8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -155,9 +155,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a01f5593f6878452c6dde102ece391b60cba79801c5f606f8fe898ff57cd5d7" +checksum = "2869e4fb31331d3b8c58c7db567d1e4e4e94ef64640beda3b6dd9b7045690941" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -222,9 +222,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c242de43a1869bcb2fbce3b377130959d10dfd562b87ac7aa2f04d98baac51" +checksum = "8b6755b093afef5925f25079dd5a7c8d096398b804ba60cb5275397b06b31689" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -243,12 +243,13 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd39b72f860cb0c542fac925f91d1939c2b14a0970b39d0ae304b5b7574a0ac" +checksum = "aeec8e6eab6e52b7c9f918748c9b811e87dbef7312a2e3a2ca1729a92966a6af" dependencies = [ "alloy-primitives", "alloy-serde", + "alloy-trie", "serde", ] @@ -266,9 +267,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c15c11661571a19a06896663c93e804ccf013159275a89a98e892014df514d8" +checksum = "4fa077efe0b834bcd89ff4ba547f48fb081e4fdc3673dd7da1b295a2cf2bb7b7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -280,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60dd0b99eaa5e715dd90d42021f7f08a0a70976ea84f41a0ad233770e0c1962b" +checksum = "209a1882a08e21aca4aac6e2a674dc6fcf614058ef8cb02947d63782b1899552" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -318,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a04cf8f3a19b024b2bc71b5774d423cd2edda7f67df6029daa1368c5c02da5" +checksum = "bffcf33dd319f21cd6f066d81cbdef0326d4bdaaf7cfe91110bc090707858e9f" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -367,9 +368,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4933c761f10e44d5e901804b56efb2ce6e0945e6c57d2fa1e5ace303fae6f74a" +checksum = "9eefa6f4c798ad01f9b4202d02cea75f5ec11fa180502f4701e2b47965a8c0bb" dependencies = [ "alloy-chains", "alloy-consensus", @@ -408,9 +409,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808719714bfb2aa24b0eb2a38411ce8e654ba11c0ebf2a6648fcbe9fabfe696d" +checksum = "aac9a7210e0812b1d814118f426f57eb7fc260a419224dd1c76d169879c06907" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -449,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce26c25efb8290b6ba559ae6c40bf6630d337e107ae242e5790501420dba7b7" +checksum = "ed30bf1041e84cabc5900f52978ca345dd9969f2194a945e6fdec25b0620705c" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -474,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41080ce2640928f0df45c41d2af629b88db3cb31af3abbe614964ae10001ddac" +checksum = "5ab686b0fa475d2a4f5916c5f07797734a691ec58e44f0f55d4746ea39cbcefb" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -487,9 +488,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db981579da4d597d9d35f56ad7641b929bf8f551ab696715132f554863c83540" +checksum = "1f0874a976ccdf83a178ad93b64bec5b8c91a47428d714d544ca70258acfa07b" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -499,9 +500,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "252b7433e731e5d24f7eb7a54a368bc813a1086aaf84643ab10e99599a6ff16c" +checksum = "d33bc190844626c08e21897736dbd7956ab323c09e6f141b118d1c8b7aff689e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -522,9 +523,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c8db5fb70d2fece7bc1cd5adf42e72fc8a23547adeff8f558d9063f1e7788c" +checksum = "cc37861dc8cbf5da35d346139fbe6e03ee7823cc21138a2c4a590d3b0b4b24be" dependencies = [ "alloy-eips", "alloy-primitives", @@ -537,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3a662ced0bfbe582d26ed85d6a0092310787331555c8f7a86f843c7ca272ef" +checksum = "f0294b553785eb3fa7fff2e8aec45e82817258e7e6c9365c034a90cb6baeebc9" dependencies = [ "alloy-primitives", "serde", @@ -547,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b000c7f3469e7faa575ba70207294cf07e91dfd6ce4d04d5d5d8069f974a66" +checksum = "5d297268357e3eae834ddd6888b15f764cbc0f4b3be9265f5f6ec239013f3d68" dependencies = [ "alloy-consensus", "alloy-eips", @@ -568,9 +569,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3468e7385fbb86b0fde5497d685c02f765ea09d36f7e07c5d1c9a52b077d38e2" +checksum = "a0600b8b5e2dc0cab12cbf91b5a885c35871789fb7b3a57b434bd4fced5b7a8b" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -590,9 +591,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26988fb56d87414c96b8fd9b69ad6ce3768bc9acc953ed02c18a66f74ab98c66" +checksum = "093d618d5a42808e7ae26062f415a1e816fc27d3d32662c6ed52d0871b154894" dependencies = [ "alloy-eips", "alloy-primitives", @@ -604,9 +605,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a90be1bc8e3659db1c9512191873a268a917efbc62b8bd39a92c12bf613b193" +checksum = "4e073ab0e67429c60be281e181731132fd07d82e091c10c29ace6935101034bb" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -618,9 +619,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beade2858d292442f5be6fce452c923072a7ac4d3898d333abf42703945444d0" +checksum = "7435f6bfb93912f16d64bb61f4278fa698469e054784f477337ef87ec0b2527b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -630,9 +631,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42de6002e2154b50b3568aea27e26bd9caf7b754658f43065f2e9b6ee0a8c839" +checksum = "9afa753a97002a33b2ccb707d9f15f31c81b8c1b786c95b73cc62bb1d1fd0c3f" dependencies = [ "alloy-primitives", "arbitrary", @@ -642,9 +643,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288a9a25e2578dab17845fd8d2be1d32de33565783ed185ded161a65f92381b" +checksum = "9b2cbff01a673936c2efd7e00d4c0e9a4dbbd6d600e2ce298078d33efbb19cd7" dependencies = [ "alloy-primitives", "async-trait", @@ -656,9 +657,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d8081f589ddc11a959605e30c723d51cad2562d9072305f8e3ef311f077e5eb" +checksum = "bd6d988cb6cd7d2f428a74476515b1a6e901e08c796767f9f93311ab74005c8b" dependencies = [ "alloy-consensus", "alloy-network", @@ -744,9 +745,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90352f4cf78017905c3244f48b38fadc345970bbc9095087c0f985a580550488" +checksum = "d69d36982b9e46075ae6b792b0f84208c6c2c15ad49f6c500304616ef67b70e0" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -764,9 +765,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d26c94d51fa8b1aee3d15db113dd0773776c02bb36dbaa2590b900dadd7e7d0" +checksum = "2e02ffd5d93ffc51d72786e607c97de3b60736ca3e636ead0ec1f7dce68ea3fd" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -779,9 +780,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c498fcdec50650be6b6a22ce7928a1b2738086b4f94f31b132e83498d45bbb" +checksum = "1b6f8b87cb84bae6d81ae6604b37741c8116f84f9784a0ecc6038c302e679d23" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -798,9 +799,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7b21335b55c9f715e2acca0228dc1d6880d961756916c13a9ce70f9f413e70" +checksum = "9c085c4e1e7680b723ffc558f61a22c061ed3f70eb3436f93f3936779c59cec1" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -816,9 +817,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" +checksum = "3a5fd8fea044cc9a8c8a50bb6f28e31f0385d820f116c5b98f6f4e55d6e5590b" dependencies = [ "alloy-primitives", "alloy-rlp", diff --git a/Cargo.toml b/Cargo.toml index f9fc6f2ca26..4233750d076 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -434,40 +434,40 @@ alloy-rlp = { version = "0.3.4", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.7.2", default-features = false } -alloy-contract = { version = "0.7.2", default-features = false } -alloy-eips = { version = "0.7.2", default-features = false } -alloy-genesis = { version = "0.7.2", default-features = false } -alloy-json-rpc = { version = "0.7.2", default-features = false } -alloy-network = { version = "0.7.2", default-features = false } -alloy-network-primitives = { version = "0.7.2", default-features = false } -alloy-node-bindings = { version = "0.7.2", default-features = false } -alloy-provider = { version = "0.7.2", features = [ +alloy-consensus = { version = "0.7.3", default-features = false } +alloy-contract = { version = "0.7.3", default-features = false } +alloy-eips = { version = "0.7.3", default-features = false } +alloy-genesis = { version = "0.7.3", default-features = false } +alloy-json-rpc = { version = "0.7.3", default-features = false } +alloy-network = { version = "0.7.3", default-features = false } +alloy-network-primitives = { version = "0.7.3", default-features = false } +alloy-node-bindings = { version = "0.7.3", default-features = false } +alloy-provider = { version = "0.7.3", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.7.2", default-features = false } -alloy-rpc-client = { version = "0.7.2", default-features = false } -alloy-rpc-types = { version = "0.7.2", features = [ +alloy-pubsub = { version = "0.7.3", default-features = false } +alloy-rpc-client = { version = "0.7.3", default-features = false } +alloy-rpc-types = { version = "0.7.3", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.7.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.7.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.7.2", default-features = false } -alloy-rpc-types-debug = { version = "0.7.2", default-features = false } -alloy-rpc-types-engine = { version = "0.7.2", default-features = false } -alloy-rpc-types-eth = { version = "0.7.2", default-features = false } -alloy-rpc-types-mev = { version = "0.7.2", default-features = false } -alloy-rpc-types-trace = { version = "0.7.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.7.2", default-features = false } -alloy-serde = { version = "0.7.2", default-features = false } -alloy-signer = { version = "0.7.2", default-features = false } -alloy-signer-local = { version = "0.7.2", default-features = false } -alloy-transport = { version = "0.7.2" } -alloy-transport-http = { version = "0.7.2", features = [ +alloy-rpc-types-admin = { version = "0.7.3", default-features = false } +alloy-rpc-types-anvil = { version = "0.7.3", default-features = false } +alloy-rpc-types-beacon = { version = "0.7.3", default-features = false } +alloy-rpc-types-debug = { version = "0.7.3", default-features = false } +alloy-rpc-types-engine = { version = "0.7.3", default-features = false } +alloy-rpc-types-eth = { version = "0.7.3", default-features = false } +alloy-rpc-types-mev = { version = "0.7.3", default-features = false } +alloy-rpc-types-trace = { version = "0.7.3", default-features = false } +alloy-rpc-types-txpool = { version = "0.7.3", default-features = false } +alloy-serde = { version = "0.7.3", default-features = false } +alloy-signer = { version = "0.7.3", default-features = false } +alloy-signer-local = { version = "0.7.3", default-features = false } +alloy-transport = { version = "0.7.3" } +alloy-transport-http = { version = "0.7.3", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.7.2", default-features = false } -alloy-transport-ws = { version = "0.7.2", default-features = false } +alloy-transport-ipc = { version = "0.7.3", default-features = false } +alloy-transport-ws = { version = "0.7.3", default-features = false } # op op-alloy-rpc-types = "0.7.3" diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 682995e7eb3..1ee94929913 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -15,7 +15,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; -use reth_primitives_traits::{size::InMemorySize, BlockHeader as _}; +use reth_primitives_traits::size::InMemorySize; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 66287624f89..40fa9c309ba 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,4 +1,5 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::{Future, FutureExt}; use reth_consensus::Consensus; diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index af4bf8145af..781d1d93ba5 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -2,6 +2,7 @@ #![allow(dead_code)] +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, transaction::DbTxMut}; diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 47d50a45bb5..42d0153b19c 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -33,12 +33,6 @@ pub trait BlockHeader: + AsRef + 'static { - /// Returns whether this header corresponds to an empty block. - fn is_empty(&self) -> bool; } -impl BlockHeader for alloy_consensus::Header { - fn is_empty(&self) -> bool { - self.is_empty() - } -} +impl BlockHeader for alloy_consensus::Header {} diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 0f90ff69e46..83be3f36fcf 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -1,17 +1,11 @@ -use std::{ - cmp::Ordering, - task::{ready, Context, Poll}, -}; - +use super::missing_static_data_error; use futures_util::TryStreamExt; use reth_codecs::Compact; -use reth_primitives_traits::{Block, BlockBody}; -use tracing::*; - use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; +use reth_primitives_traits::{Block, BlockBody}; use reth_provider::{ providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, StorageLocation, @@ -21,8 +15,11 @@ use reth_stages_api::{ UnwindInput, UnwindOutput, }; use reth_storage_errors::provider::ProviderResult; - -use super::missing_static_data_error; +use std::{ + cmp::Ordering, + task::{ready, Context, Poll}, +}; +use tracing::*; /// The body stage downloads block bodies. /// @@ -264,18 +261,15 @@ where #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, + }; use assert_matches::assert_matches; - use reth_provider::StaticFileProviderFactory; use reth_stages_api::StageUnitCheckpoint; use test_utils::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, - }; - - use super::*; - stage_test_suite_ext!(BodyTestRunner, body); /// Checks that the stage downloads at most `batch_size` blocks. @@ -490,7 +484,7 @@ mod tests { UnwindStageTestRunner, }, }; - use alloy_consensus::Header; + use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use futures_util::Stream; use reth_db::{static_file::HeaderWithHashMask, tables}; From 659448241bd015153d9b851058ab75743bca0ee4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 12:44:48 +0100 Subject: [PATCH 888/970] chore: reuse alloy proof fns (#13091) --- crates/primitives/src/proofs.rs | 36 +++++++++------------------------ 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 81c26d7180e..2a1d5b6982b 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,26 +1,22 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; -use alloc::{borrow::Borrow, vec::Vec}; -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; -use alloy_primitives::{keccak256, B256}; -use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; +use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. -pub fn calculate_transaction_root(transactions: &[T]) -> B256 -where - T: Encodable2718, -{ - ordered_trie_root_with_encoder(transactions, |tx, buf| tx.borrow().encode_2718(buf)) -} +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_transaction_root; /// Calculates the root hash of the withdrawals. -pub fn calculate_withdrawals_root(withdrawals: &[Withdrawal]) -> B256 { - ordered_trie_root(withdrawals) -} +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_withdrawals_root; + +/// Calculates the root hash for ommer/uncle headers. +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_ommers_root; /// Calculates the receipt root for a header. pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { @@ -41,18 +37,6 @@ pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { }) } -/// Calculates the root hash for ommer/uncle headers. -pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { - // Check if `ommers` list is empty - if ommers.is_empty() { - return EMPTY_OMMER_ROOT_HASH - } - // RLP Encode - let mut ommers_rlp = Vec::new(); - alloy_rlp::encode_list(ommers, &mut ommers_rlp); - keccak256(ommers_rlp) -} - #[cfg(test)] mod tests { use super::*; From 804dc99ef44c352d469087adf4673bed56f60dfa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 13:13:14 +0100 Subject: [PATCH 889/970] feat: impl compact for alloy txtype (#13152) --- Cargo.lock | 1 + crates/storage/codecs/Cargo.toml | 1 + .../codecs/src/alloy/transaction/mod.rs | 3 +- .../codecs/src/alloy/transaction/txtype.rs | 97 +++++++++++++++++++ 4 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 crates/storage/codecs/src/alloy/transaction/txtype.rs diff --git a/Cargo.lock b/Cargo.lock index ce20350b60d..df7737b06b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6837,6 +6837,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "reth-codecs-derive", + "rstest", "serde", "serde_json", "test-fuzz", diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 8fbf1632403..c3210b21ae2 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -48,6 +48,7 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +rstest.workspace = true [features] default = ["std", "alloy"] diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index fe31293cd18..5c829ddf56b 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -5,7 +5,8 @@ cond_mod!( eip2930, eip4844, eip7702, - legacy + legacy, + txtype ); diff --git a/crates/storage/codecs/src/alloy/transaction/txtype.rs b/crates/storage/codecs/src/alloy/transaction/txtype.rs new file mode 100644 index 00000000000..63f80bfaf20 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/txtype.rs @@ -0,0 +1,97 @@ +//! Compact implementation for [`TxType`] + +use crate::txtype::{COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY}; +use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; +use alloy_consensus::TxType; + +impl crate::Compact for TxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use crate::txtype::*; + + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip4844 => { + buf.put_u8(EIP4844_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP4844_TX_TYPE_ID => Self::Eip4844, + EIP7702_TX_TYPE_ID => Self::Eip7702, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rstest::rstest; + + use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; + use crate::Compact; + + + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); + } + + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } +} \ No newline at end of file From b4124dd1b033407a4defbf1c7173918ddb44a241 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 5 Dec 2024 17:14:51 +0400 Subject: [PATCH 890/970] feat: make RPC cache generic over primitives (#13146) --- Cargo.lock | 1 + crates/optimism/evm/Cargo.toml | 2 + crates/optimism/evm/src/l1.rs | 6 +- crates/optimism/rpc/src/eth/block.rs | 8 +- crates/optimism/rpc/src/eth/call.rs | 9 +- crates/optimism/rpc/src/eth/mod.rs | 50 +++--- crates/optimism/rpc/src/eth/transaction.rs | 12 +- crates/rpc/rpc-builder/src/eth.rs | 4 +- crates/rpc/rpc-builder/src/lib.rs | 7 +- .../rpc-eth-api/src/helpers/transaction.rs | 14 +- crates/rpc/rpc-eth-api/src/node.rs | 7 +- crates/rpc/rpc-eth-types/src/builder/ctx.rs | 28 +-- crates/rpc/rpc-eth-types/src/cache/mod.rs | 163 +++++++++--------- crates/rpc/rpc-eth-types/src/fee_history.rs | 73 ++++---- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 50 +++--- crates/rpc/rpc-eth-types/src/logs_utils.rs | 20 ++- crates/rpc/rpc/src/eth/core.rs | 47 +++-- crates/rpc/rpc/src/eth/filter.rs | 25 ++- crates/rpc/rpc/src/eth/helpers/block.rs | 9 +- crates/rpc/rpc/src/eth/helpers/call.rs | 14 +- crates/rpc/rpc/src/eth/helpers/fees.rs | 10 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 4 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 3 +- crates/rpc/rpc/src/eth/helpers/signer.rs | 3 + crates/rpc/rpc/src/eth/helpers/spec.rs | 3 +- crates/rpc/rpc/src/eth/helpers/state.rs | 25 +-- crates/rpc/rpc/src/eth/helpers/trace.rs | 6 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 11 +- crates/storage/storage-api/src/block.rs | 3 + crates/storage/storage-api/src/receipts.rs | 6 +- .../storage/storage-api/src/transactions.rs | 5 +- 31 files changed, 354 insertions(+), 274 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df7737b06b8..666293e58e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8382,6 +8382,7 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-revm", "revm", diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index ab22e3e3e81..7afb3b50e67 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-revm.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true @@ -63,6 +64,7 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", + "reth-primitives-traits/std", "revm/std", "reth-optimism-primitives/std", "reth-ethereum-forks/std", diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index ef8c3f3b3db..1194dd63c2b 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -8,7 +8,7 @@ use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardfork; -use reth_primitives::BlockBody; +use reth_primitives_traits::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, @@ -32,9 +32,9 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(body: &BlockBody) -> Result { +pub fn extract_l1_info(body: &B) -> Result { let l1_info_tx_data = body - .transactions + .transactions() .first() .ok_or_else(|| OpBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 92b4353ec9e..a37a8a15264 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -9,10 +9,10 @@ use reth_primitives::TransactionMeta; use reth_provider::HeaderProvider; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcNodeCore, RpcReceipt, + RpcReceipt, }; -use crate::{OpEthApi, OpEthApiError, OpReceiptBuilder}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, OpReceiptBuilder}; impl EthBlocks for OpEthApi where @@ -20,7 +20,7 @@ where Error = OpEthApiError, NetworkTypes: Network, >, - N: RpcNodeCore + HeaderProvider>, + N: OpNodeCore + HeaderProvider>, { async fn block_receipts( &self, @@ -77,6 +77,6 @@ where impl LoadBlock for OpEthApi where Self: LoadPendingBlock + SpawnBlocking, - N: RpcNodeCore, + N: OpNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index c5e96bb87d1..c9d874f7392 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,3 +1,4 @@ +use super::OpNodeCore; use crate::{OpEthApi, OpEthApiError}; use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; @@ -5,7 +6,7 @@ use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, RpcNodeCore, + FromEthApiError, IntoEthApiError, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; @@ -13,7 +14,7 @@ use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi where Self: EstimateCall + LoadPendingBlock, - N: RpcNodeCore, + N: OpNodeCore, { } @@ -21,7 +22,7 @@ impl EstimateCall for OpEthApi where Self: Call, Self::Error: From, - N: RpcNodeCore, + N: OpNodeCore, { } @@ -29,7 +30,7 @@ impl Call for OpEthApi where Self: LoadState> + SpawnBlocking, Self::Error: From, - N: RpcNodeCore, + N: OpNodeCore, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 27672804839..0e657bf0440 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,6 +8,7 @@ mod call; mod pending_block; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reth_node_api::NodePrimitives; use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; @@ -21,7 +22,8 @@ use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; use reth_provider::{ BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - EvmEnvProvider, StageCheckpointReader, StateProviderFactory, + EvmEnvProvider, NodePrimitivesProvider, ProviderBlock, ProviderReceipt, StageCheckpointReader, + StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -48,6 +50,10 @@ pub type EthApiNodeBackend = EthApiInner< ::Evm, >; +/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. +pub trait OpNodeCore: RpcNodeCore {} +impl OpNodeCore for T where T: RpcNodeCore {} + /// OP-Reth `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. @@ -59,14 +65,14 @@ pub type EthApiNodeBackend = EthApiInner< /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. #[derive(Clone)] -pub struct OpEthApi { +pub struct OpEthApi { /// Gateway to node's core components. inner: Arc>, } impl OpEthApi where - N: RpcNodeCore< + N: OpNodeCore< Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions @@ -83,7 +89,7 @@ where impl EthApiTypes for OpEthApi where Self: Send + Sync, - N: RpcNodeCore, + N: OpNodeCore, { type Error = OpEthApiError; type NetworkTypes = Optimism; @@ -96,7 +102,7 @@ where impl RpcNodeCore for OpEthApi where - N: RpcNodeCore, + N: OpNodeCore, { type Provider = N::Provider; type Pool = N::Pool; @@ -132,17 +138,17 @@ where impl RpcNodeCoreExt for OpEthApi where - N: RpcNodeCore, + N: OpNodeCore, { #[inline] - fn cache(&self) -> &EthStateCache { + fn cache(&self) -> &EthStateCache, ProviderReceipt> { self.inner.eth_api.cache() } } impl EthApiSpec for OpEthApi where - N: RpcNodeCore< + N: OpNodeCore< Provider: ChainSpecProvider + BlockNumReader + StageCheckpointReader, @@ -163,7 +169,7 @@ where impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, - N: RpcNodeCore, + N: OpNodeCore, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -184,7 +190,7 @@ where impl LoadFee for OpEthApi where Self: LoadBlock, - N: RpcNodeCore< + N: OpNodeCore< Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider @@ -203,7 +209,7 @@ where } impl LoadState for OpEthApi where - N: RpcNodeCore< + N: OpNodeCore< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, > @@ -213,7 +219,7 @@ impl LoadState for OpEthApi where impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, - N: RpcNodeCore, + N: OpNodeCore, { #[inline] fn max_proof_window(&self) -> u64 { @@ -224,27 +230,27 @@ where impl EthFees for OpEthApi where Self: LoadFee, - N: RpcNodeCore, + N: OpNodeCore, { } impl Trace for OpEthApi where Self: RpcNodeCore + LoadState>, - N: RpcNodeCore, + N: OpNodeCore, { } impl AddDevSigners for OpEthApi where - N: RpcNodeCore, + N: OpNodeCore, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } @@ -252,7 +258,7 @@ impl fmt::Debug for OpEthApi { /// Container type `OpEthApi` #[allow(missing_debug_implementations)] -struct OpEthApiInner { +struct OpEthApiInner { /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -285,10 +291,12 @@ impl OpEthApiBuilder { /// Builds an instance of [`OpEthApi`] pub fn build(self, ctx: &EthApiBuilderCtx) -> OpEthApi where - N: RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + CanonStateSubscriptions + N: OpNodeCore< + Provider: BlockReaderIdExt< + Block = <::Primitives as NodePrimitives>::Block, + Receipt = <::Primitives as NodePrimitives>::Receipt, + > + ChainSpecProvider + + CanonStateSubscriptions + Clone + 'static, >, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3ba5edead55..b5d4ce2bc55 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -10,17 +10,17 @@ use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, + FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use crate::{OpEthApi, OpEthApiError, SequencerClient}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where Self: LoadTransaction, - N: RpcNodeCore, + N: OpNodeCore, { fn signers(&self) -> &parking_lot::RwLock>> { self.inner.eth_api.signers() @@ -56,15 +56,15 @@ where impl LoadTransaction for OpEthApi where - Self: SpawnBlocking + FullEthApiTypes, - N: RpcNodeCore, + Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, + N: OpNodeCore, Self::Pool: TransactionPool, { } impl OpEthApi where - N: RpcNodeCore, + N: OpNodeCore, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 283fba6e957..453efb0ddb4 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -15,11 +15,11 @@ pub type DynEthApiBuilder { +pub struct EthHandlers { /// Main `eth_` request handler pub api: EthApi, /// The async caching layer used by the eth handlers - pub cache: EthStateCache, + pub cache: EthStateCache, /// Polling based filter handler available on all transports pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 031f960096f..1d8bfb9c297 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -925,7 +925,7 @@ impl RpcModuleConfigBuilder { /// A Helper type the holds instances of the configured modules. #[derive(Debug, Clone)] pub struct RpcRegistryInner< - Provider, + Provider: BlockReader, Pool, Network, Tasks, @@ -1029,6 +1029,7 @@ where impl RpcRegistryInner where + Provider: BlockReader, EthApi: EthApiTypes, { /// Returns a reference to the installed [`EthApi`](reth_rpc::eth::EthApi). @@ -1045,7 +1046,7 @@ where /// /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is /// requested. - pub const fn eth_cache(&self) -> &EthStateCache { + pub const fn eth_cache(&self) -> &EthStateCache { &self.eth.cache } @@ -1089,7 +1090,7 @@ impl where Network: NetworkInfo + Clone + 'static, EthApi: EthApiTypes, - Provider: ChainSpecProvider, + Provider: BlockReader + ChainSpecProvider, BlockExecutor: BlockExecutorProvider, { /// Instantiates `AdminApi` diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 3b4ecb9de27..f73d761600e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -8,12 +8,13 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; +use reth_node_api::BlockBody; use reth_primitives::{ transaction::SignedTransactionIntoRecoveredExt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, }; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, + BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, }; use reth_rpc_eth_types::{ @@ -79,15 +80,17 @@ pub trait EthTransactions: LoadTransaction { /// Get all transactions in the block with the given hash. /// /// Returns `None` if block does not exist. + #[expect(clippy::type_complexity)] fn transactions_by_block( &self, block: B256, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future>>, Self::Error>> + Send + { async move { self.cache() .get_sealed_block_with_senders(block) .await - .map(|b| b.map(|b| b.body.transactions.clone())) + .map(|b| b.map(|b| b.body.transactions().to_vec())) .map_err(Self::Error::from_eth_err) } } @@ -568,7 +571,10 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { hash: B256, ) -> impl Future< Output = Result< - Option<(TransactionSource>, Arc)>, + Option<( + TransactionSource>, + Arc>>, + )>, Self::Error, >, > + Send { diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 12dbe8f6664..538cb2ead8a 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -1,6 +1,7 @@ //! Helper trait for interfacing with [`FullNodeComponents`]. use reth_node_api::FullNodeComponents; +use reth_provider::{BlockReader, ProviderBlock, ProviderReceipt}; use reth_rpc_eth_types::EthStateCache; /// Helper trait to relax trait bounds on [`FullNodeComponents`]. @@ -76,7 +77,9 @@ where /// Additional components, asides the core node components, needed to run `eth_` namespace API /// server. -pub trait RpcNodeCoreExt: RpcNodeCore { +pub trait RpcNodeCoreExt: RpcNodeCore { /// Returns handle to RPC cache service. - fn cache(&self) -> &EthStateCache; + fn cache( + &self, + ) -> &EthStateCache, ProviderReceipt>; } diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index db2beb4a454..f9710882f2b 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -3,7 +3,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_primitives::NodePrimitives; -use reth_storage_api::BlockReaderIdExt; +use reth_storage_api::{BlockReader, BlockReaderIdExt}; use reth_tasks::TaskSpawner; use crate::{ @@ -13,7 +13,10 @@ use crate::{ /// Context for building the `eth` namespace API. #[derive(Debug, Clone)] -pub struct EthApiBuilderCtx { +pub struct EthApiBuilderCtx +where + Provider: BlockReader, +{ /// Database handle. pub provider: Provider, /// Mempool handle. @@ -29,7 +32,7 @@ pub struct EthApiBuilderCtx { /// Events handle. pub events: Events, /// RPC cache handle. - pub cache: EthStateCache, + pub cache: EthStateCache, } impl @@ -38,27 +41,24 @@ where Provider: BlockReaderIdExt + Clone, { /// Returns a new [`FeeHistoryCache`] for the context. - pub fn new_fee_history_cache(&self) -> FeeHistoryCache + pub fn new_fee_history_cache(&self) -> FeeHistoryCache where - Provider: ChainSpecProvider + 'static, + N: NodePrimitives, Tasks: TaskSpawner, - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - >, - >, + Events: CanonStateSubscriptions, + Provider: + BlockReaderIdExt + ChainSpecProvider + 'static, { - let fee_history_cache = - FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); + let fee_history_cache = FeeHistoryCache::new(self.config.fee_history_cache); let new_canonical_blocks = self.events.canonical_state_stream(); let fhc = fee_history_cache.clone(); let provider = self.provider.clone(); + let cache = self.cache.clone(); self.executor.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { - fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider, cache).await; }), ); diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 7a0d9dfa0f0..16863887240 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,14 +1,14 @@ //! Async caching support for eth RPC use super::{EthStateCacheConfig, MultiConsumerLruCache}; -use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::Chain; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::{Block, BlockBody}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use schnellru::{ByLength, Limiter}; @@ -30,41 +30,49 @@ pub mod metrics; pub mod multi_consumer; /// The type that can send the response to a requested [`SealedBlockWithSenders`] -type BlockTransactionsResponseSender = - oneshot::Sender>>>; +type BlockTransactionsResponseSender = oneshot::Sender>>>; /// The type that can send the response to a requested [`SealedBlockWithSenders`] -type BlockWithSendersResponseSender = - oneshot::Sender>>>; +type BlockWithSendersResponseSender = + oneshot::Sender>>>>; /// The type that can send the response to the requested receipts of a block. -type ReceiptsResponseSender = oneshot::Sender>>>>; +type ReceiptsResponseSender = oneshot::Sender>>>>; /// The type that can send the response to a requested header -type HeaderResponseSender = oneshot::Sender>; +type HeaderResponseSender = oneshot::Sender>; -type BlockLruCache = MultiConsumerLruCache< +type BlockLruCache = MultiConsumerLruCache< B256, - Arc, + Arc>, L, - Either, + Either< + BlockWithSendersResponseSender, + BlockTransactionsResponseSender<<::Body as BlockBody>::Transaction>, + >, >; -type ReceiptsLruCache = - MultiConsumerLruCache>, L, ReceiptsResponseSender>; +type ReceiptsLruCache = + MultiConsumerLruCache>, L, ReceiptsResponseSender>; -type HeaderLruCache = MultiConsumerLruCache; +type HeaderLruCache = MultiConsumerLruCache>; /// Provides async access to cached eth data /// /// This is the frontend for the async caching service which manages cached data on a different /// task. -#[derive(Debug, Clone)] -pub struct EthStateCache { - to_service: UnboundedSender, +#[derive(Debug)] +pub struct EthStateCache { + to_service: UnboundedSender>, } -impl EthStateCache { +impl Clone for EthStateCache { + fn clone(&self) -> Self { + Self { to_service: self.to_service.clone() } + } +} + +impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. fn create( provider: Provider, @@ -73,7 +81,10 @@ impl EthStateCache { max_receipts: u32, max_headers: u32, max_concurrent_db_operations: usize, - ) -> (Self, EthStateCacheService) { + ) -> (Self, EthStateCacheService) + where + Provider: BlockReader, + { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, @@ -95,14 +106,8 @@ impl EthStateCache { /// See also [`Self::spawn_with`] pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where - Provider: StateProviderFactory - + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - > + Clone - + Unpin - + 'static, + Provider: + StateProviderFactory + BlockReader + Clone + Unpin + 'static, { Self::spawn_with(provider, config, TokioTaskExecutor::default()) } @@ -117,14 +122,8 @@ impl EthStateCache { executor: Tasks, ) -> Self where - Provider: StateProviderFactory - + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - > + Clone - + Unpin - + 'static, + Provider: + StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { let EthStateCacheConfig { @@ -151,19 +150,16 @@ impl EthStateCache { pub async fn get_sealed_block_with_senders( &self, block_hash: B256, - ) -> ProviderResult>> { + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } - /// Requests the [Receipt] for the block hash + /// Requests the receipts for the block hash /// /// Returns `None` if the block was not found. - pub async fn get_receipts( - &self, - block_hash: B256, - ) -> ProviderResult>>> { + pub async fn get_receipts(&self, block_hash: B256) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -173,7 +169,7 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult, Arc>)>> { + ) -> ProviderResult>, Arc>)>> { let block = self.get_sealed_block_with_senders(block_hash); let receipts = self.get_receipts(block_hash); @@ -185,7 +181,7 @@ impl EthStateCache { /// Requests the header for the given hash. /// /// Returns an error if the header is not found. - pub async fn get_header(&self, block_hash: B256) -> ProviderResult
{ + pub async fn get_header(&self, block_hash: B256) -> ProviderResult { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -216,25 +212,26 @@ pub(crate) struct EthStateCacheService< LimitReceipts = ByLength, LimitHeaders = ByLength, > where - LimitBlocks: Limiter>, - LimitReceipts: Limiter>>, - LimitHeaders: Limiter, + Provider: BlockReader, + LimitBlocks: Limiter>>, + LimitReceipts: Limiter>>, + LimitHeaders: Limiter, { /// The type used to lookup data from disk provider: Provider, /// The LRU cache for full blocks grouped by their hash. - full_block_cache: BlockLruCache, + full_block_cache: BlockLruCache, /// The LRU cache for full blocks grouped by their hash. - receipts_cache: ReceiptsLruCache, + receipts_cache: ReceiptsLruCache, /// The LRU cache for headers. /// /// Headers are cached because they are required to populate the environment for execution /// (evm). - headers_cache: HeaderLruCache, + headers_cache: HeaderLruCache, /// Sender half of the action channel. - action_tx: UnboundedSender, + action_tx: UnboundedSender>, /// Receiver half of the action channel. - action_rx: UnboundedReceiverStream, + action_rx: UnboundedReceiverStream>, /// The type that's used to spawn tasks that do the actual work action_task_spawner: Tasks, /// Rate limiter @@ -249,7 +246,7 @@ where fn on_new_block( &mut self, block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders @@ -260,7 +257,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions.clone()) + maybe_block.map(|block| block.block.body.transactions().to_vec()) })); } } @@ -276,7 +273,7 @@ where fn on_new_receipts( &mut self, block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders @@ -294,7 +291,7 @@ where fn on_reorg_block( &mut self, block_hash: B256, - res: ProviderResult>, + res: ProviderResult>>, ) { let res = res.map(|b| b.map(Arc::new)); if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -306,7 +303,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions.clone()) + maybe_block.map(|block| block.block.body.transactions().to_vec()) })); } } @@ -317,7 +314,7 @@ where fn on_reorg_receipts( &mut self, block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders @@ -336,14 +333,7 @@ where impl Future for EthStateCacheService where - Provider: StateProviderFactory - + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - > + Clone - + Unpin - + 'static, + Provider: StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { type Output = (); @@ -504,52 +494,55 @@ where } /// All message variants sent through the channel -enum CacheAction { +enum CacheAction { GetBlockWithSenders { block_hash: B256, - response_tx: BlockWithSendersResponseSender, + response_tx: BlockWithSendersResponseSender, }, GetHeader { block_hash: B256, - response_tx: HeaderResponseSender, + response_tx: HeaderResponseSender, }, GetReceipts { block_hash: B256, - response_tx: ReceiptsResponseSender, + response_tx: ReceiptsResponseSender, }, BlockWithSendersResult { block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>>, }, ReceiptsResult { block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, }, HeaderResult { block_hash: B256, - res: Box>, + res: Box>, }, CacheNewCanonicalChain { - chain_change: ChainChange, + chain_change: ChainChange, }, RemoveReorgedChain { - chain_change: ChainChange, + chain_change: ChainChange, }, } -struct BlockReceipts { +struct BlockReceipts { block_hash: B256, - receipts: Vec>, + receipts: Vec>, } /// A change of the canonical chain -struct ChainChange { - blocks: Vec, - receipts: Vec, +struct ChainChange { + blocks: Vec>, + receipts: Vec>, } -impl ChainChange { - fn new(chain: Arc) -> Self { +impl ChainChange { + fn new(chain: Arc>) -> Self + where + N: NodePrimitives, + { let (blocks, receipts): (Vec<_>, Vec<_>) = chain .blocks_and_receipts() .map(|(block, receipts)| { @@ -566,9 +559,11 @@ impl ChainChange { /// immediately before they need to be fetched from disk. /// /// Reorged blocks are removed from the cache. -pub async fn cache_new_blocks_task(eth_state_cache: EthStateCache, mut events: St) -where - St: Stream + Unpin + 'static, +pub async fn cache_new_blocks_task( + eth_state_cache: EthStateCache, + mut events: St, +) where + St: Stream> + Unpin + 'static, { while let Some(event) = events.next().await { if let Some(reverted) = event.reverted() { diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 35233e6c219..2c365ae90bf 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -6,6 +6,7 @@ use std::{ sync::{atomic::Ordering::SeqCst, Arc}, }; +use alloy_consensus::{BlockHeader, Transaction, TxReceipt}; use alloy_eips::eip1559::calc_next_block_base_fee; use alloy_primitives::B256; use alloy_rpc_types_eth::TxGasAndReward; @@ -16,8 +17,8 @@ use futures::{ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; -use reth_primitives_traits::{Block, BlockBody}; +use reth_primitives::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::BlockBody; use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; @@ -36,13 +37,12 @@ pub struct FeeHistoryCache { impl FeeHistoryCache { /// Creates new `FeeHistoryCache` instance, initialize it with the more recent data, set bounds - pub fn new(eth_cache: EthStateCache, config: FeeHistoryCacheConfig) -> Self { + pub fn new(config: FeeHistoryCacheConfig) -> Self { let inner = FeeHistoryCacheInner { lower_bound: Default::default(), upper_bound: Default::default(), config, entries: Default::default(), - eth_cache, }; Self { inner: Arc::new(inner) } } @@ -73,9 +73,12 @@ impl FeeHistoryCache { } /// Insert block data into the cache. - async fn insert_blocks<'a, I>(&self, blocks: I) + async fn insert_blocks<'a, I, H, B, R>(&self, blocks: I) where - I: IntoIterator>)>, + H: BlockHeader + 'a, + B: BlockBody, + R: TxReceipt, + I: IntoIterator, Arc>)>, { let mut entries = self.inner.entries.write().await; @@ -87,11 +90,11 @@ impl FeeHistoryCache { &percentiles, fee_history_entry.gas_used, fee_history_entry.base_fee_per_gas, - &block.body.transactions, + block.body.transactions(), &receipts, ) .unwrap_or_default(); - entries.insert(block.number, fee_history_entry); + entries.insert(block.number(), fee_history_entry); } // enforce bounds by popping the oldest entries @@ -200,7 +203,6 @@ struct FeeHistoryCacheInner { config: FeeHistoryCacheConfig, /// Stores the entries of the cache entries: tokio::sync::RwLock>, - eth_cache: EthStateCache, } /// Awaits for new chain events and directly inserts them into the cache so they're available @@ -209,10 +211,12 @@ pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, + cache: EthStateCache, ) where St: Stream> + Unpin + 'static, - Provider: BlockReaderIdExt + ChainSpecProvider + 'static, - N: NodePrimitives, + Provider: + BlockReaderIdExt + ChainSpecProvider + 'static, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -225,12 +229,7 @@ pub async fn fee_history_cache_new_blocks_task( trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache"); if let Ok(Some(hash)) = provider.block_hash(block_number) { // fetch missing block - fetch_missing_block = fee_history_cache - .inner - .eth_cache - .get_block_and_receipts(hash) - .boxed() - .fuse(); + fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse(); } } } @@ -270,13 +269,17 @@ pub async fn fee_history_cache_new_blocks_task( /// the corresponding rewards for the transactions at each percentile. /// /// The results are returned as a vector of U256 values. -pub fn calculate_reward_percentiles_for_block( +pub fn calculate_reward_percentiles_for_block( percentiles: &[f64], gas_used: u64, base_fee_per_gas: u64, - transactions: &[TransactionSigned], - receipts: &[Receipt], -) -> Result, EthApiError> { + transactions: &[T], + receipts: &[R], +) -> Result, EthApiError> +where + T: Transaction, + R: TxReceipt, +{ let mut transactions = transactions .iter() .zip(receipts) @@ -287,12 +290,12 @@ pub fn calculate_reward_percentiles_for_block( // While we will sum up the gas again later, it is worth // noting that the order of the transactions will be different, // so the sum will also be different for each receipt. - let gas_used = receipt.cumulative_gas_used - *previous_gas; - *previous_gas = receipt.cumulative_gas_used; + let gas_used = receipt.cumulative_gas_used() - *previous_gas; + *previous_gas = receipt.cumulative_gas_used(); Some(TxGasAndReward { - gas_used, - reward: tx.effective_tip_per_gas(Some(base_fee_per_gas)).unwrap_or_default(), + gas_used: gas_used as u64, + reward: tx.effective_tip_per_gas(base_fee_per_gas).unwrap_or_default(), }) }) .collect::>(); @@ -361,20 +364,20 @@ impl FeeHistoryEntry { /// Creates a new entry from a sealed block. /// /// Note: This does not calculate the rewards for the block. - pub fn new(block: &SealedBlock) -> Self { + pub fn new(block: &SealedBlock) -> Self { Self { - base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), - gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, - base_fee_per_blob_gas: block.blob_fee(), - blob_gas_used_ratio: block.body().blob_gas_used() as f64 / + base_fee_per_gas: block.base_fee_per_gas().unwrap_or_default(), + gas_used_ratio: block.gas_used() as f64 / block.gas_limit() as f64, + base_fee_per_blob_gas: block.excess_blob_gas().map(calc_blob_gasprice), + blob_gas_used_ratio: block.body.blob_gas_used() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, - excess_blob_gas: block.excess_blob_gas, - blob_gas_used: block.blob_gas_used, - gas_used: block.gas_used, + excess_blob_gas: block.excess_blob_gas(), + blob_gas_used: block.blob_gas_used(), + gas_used: block.gas_used(), header_hash: block.hash(), - gas_limit: block.gas_limit, + gas_limit: block.gas_limit(), rewards: Vec::new(), - timestamp: block.timestamp, + timestamp: block.timestamp(), } } diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 73cab209fd5..ed49d7c6701 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,13 +1,13 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_rpc_server_types::{ constants, constants::gas_oracle::{ @@ -15,7 +15,7 @@ use reth_rpc_server_types::{ DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, }, }; -use reth_storage_api::BlockReaderIdExt; +use reth_storage_api::{BlockReader, BlockReaderIdExt}; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Debug, Formatter}; @@ -70,11 +70,14 @@ impl Default for GasPriceOracleConfig { /// Calculates a gas price depending on recent blocks. #[derive(Debug)] -pub struct GasPriceOracle { +pub struct GasPriceOracle +where + Provider: BlockReader, +{ /// The type used to subscribe to block events and get block info provider: Provider, /// The cache for blocks - cache: EthStateCache, + cache: EthStateCache, /// The config for the oracle oracle_config: GasPriceOracleConfig, /// The price under which the sample will be ignored. @@ -92,7 +95,7 @@ where pub fn new( provider: Provider, mut oracle_config: GasPriceOracleConfig, - cache: EthStateCache, + cache: EthStateCache, ) -> Self { // sanitize the percentile to be less than 100 if oracle_config.percentile > 100 { @@ -220,43 +223,44 @@ where None => return Ok(None), }; - let base_fee_per_gas = block.base_fee_per_gas; - let parent_hash = block.parent_hash; + let base_fee_per_gas = block.base_fee_per_gas(); + let parent_hash = block.parent_hash(); // sort the functions by ascending effective tip first - let sorted_transactions = block - .body - .transactions - .iter() - .sorted_by_cached_key(|tx| tx.effective_tip_per_gas(base_fee_per_gas)); + let sorted_transactions = block.body.transactions().iter().sorted_by_cached_key(|tx| { + if let Some(base_fee) = base_fee_per_gas { + (*tx).effective_tip_per_gas(base_fee) + } else { + Some((*tx).priority_fee_or_price()) + } + }); let mut prices = Vec::with_capacity(limit); for tx in sorted_transactions { - let mut effective_gas_tip = None; + let effective_tip = if let Some(base_fee) = base_fee_per_gas { + tx.effective_tip_per_gas(base_fee) + } else { + Some(tx.priority_fee_or_price()) + }; + // ignore transactions with a tip under the configured threshold if let Some(ignore_under) = self.ignore_price { - let tip = tx.effective_tip_per_gas(base_fee_per_gas); - effective_gas_tip = Some(tip); - if tip < Some(ignore_under) { + if effective_tip < Some(ignore_under) { continue } } // check if the sender was the coinbase, if so, ignore if let Some(sender) = tx.recover_signer() { - if sender == block.beneficiary { + if sender == block.beneficiary() { continue } } // a `None` effective_gas_tip represents a transaction where the max_fee_per_gas is // less than the base fee which would be invalid - let effective_gas_tip = effective_gas_tip - .unwrap_or_else(|| tx.effective_tip_per_gas(base_fee_per_gas)) - .ok_or(RpcInvalidTransactionError::FeeCapTooLow)?; - - prices.push(U256::from(effective_gas_tip)); + prices.push(U256::from(effective_tip.ok_or(RpcInvalidTransactionError::FeeCapTooLow)?)); // we have enough entries if prices.len() >= limit { diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 2e41c7a1183..6078d32e894 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,14 +2,15 @@ //! //! Log parsing for building filter. +use alloy_consensus::TxReceipt; use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; use reth_primitives::{Receipt, SealedBlockWithSenders}; -use reth_primitives_traits::SignedTransaction; -use reth_storage_api::BlockReader; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; /// Returns all matching of a block's receipts when the transaction hashes are known. @@ -54,20 +55,23 @@ pub enum ProviderOrBlock<'a, P: BlockReader> { /// Provider Provider(&'a P), /// [`SealedBlockWithSenders`] - Block(Arc), + Block(Arc>>), } /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs>( +pub fn append_matching_block_logs

( all_logs: &mut Vec, provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, block_num_hash: BlockNumHash, - receipts: &[Receipt], + receipts: &[P::Receipt], removed: bool, block_timestamp: u64, -) -> Result<(), ProviderError> { +) -> Result<(), ProviderError> +where + P: BlockReader, +{ // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; @@ -81,13 +85,13 @@ pub fn append_matching_block_logs // The transaction hash of the current receipt. let mut transaction_hash = None; - for log in &receipt.logs { + for log in receipt.logs() { if log_matches_filter(block_num_hash, log, filter) { // if this is the first match in the receipt's logs, look up the transaction hash if transaction_hash.is_none() { transaction_hash = match &provider_or_block { ProviderOrBlock::Block(block) => { - block.body.transactions.get(receipt_idx).map(|t| t.hash()) + block.body.transactions().get(receipt_idx).map(|t| t.trie_hash()) } ProviderOrBlock::Provider(provider) => { let first_tx_num = match loaded_first_tx_num { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 092d9485162..0a17e5e5f2b 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -9,7 +9,10 @@ use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; use reth_primitives::NodePrimitives; -use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ProviderBlock, + ProviderReceipt, +}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, node::RpcNodeCoreExt, @@ -37,7 +40,7 @@ use crate::eth::EthTxBuilder; /// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other /// network handlers (for example ipc). #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] pub(super) inner: Arc>, @@ -45,7 +48,10 @@ pub struct EthApi { pub tx_resp_builder: EthTxBuilder, } -impl Clone for EthApi { +impl Clone for EthApi +where + Provider: BlockReader, +{ fn clone(&self) -> Self { Self { inner: self.inner.clone(), tx_resp_builder: EthTxBuilder } } @@ -61,7 +67,7 @@ where provider: Provider, pool: Pool, network: Network, - eth_cache: EthStateCache, + eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, @@ -106,8 +112,8 @@ where Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, + Block = ProviderBlock, + Receipt = ProviderReceipt, >, >, { @@ -137,6 +143,7 @@ where impl EthApiTypes for EthApi where Self: Send + Sync, + Provider: BlockReader, { type Error = EthApiError; type NetworkTypes = Ethereum; @@ -149,7 +156,7 @@ where impl RpcNodeCore for EthApi where - Provider: Send + Sync + Clone + Unpin, + Provider: BlockReader + Send + Sync + Clone + Unpin, Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, EvmConfig: Send + Sync + Clone + Unpin, @@ -184,16 +191,21 @@ where impl RpcNodeCoreExt for EthApi where - Self: RpcNodeCore, + Provider: BlockReader + Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, { #[inline] - fn cache(&self) -> &EthStateCache { + fn cache(&self) -> &EthStateCache, ProviderReceipt> { self.inner.cache() } } impl std::fmt::Debug for EthApi +where + Provider: BlockReader, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() @@ -204,6 +216,7 @@ impl SpawnBlocking for EthApi where Self: Clone + Send + Sync + 'static, + Provider: BlockReader, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -223,7 +236,7 @@ where /// Container type `EthApi` #[allow(missing_debug_implementations)] -pub struct EthApiInner { +pub struct EthApiInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -233,7 +246,7 @@ pub struct EthApiInner { /// All configured Signers signers: parking_lot::RwLock>>, /// The async cache frontend for eth related data - eth_cache: EthStateCache, + eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions gas_oracle: GasPriceOracle, /// Maximum gas limit for `eth_call` and call tracing RPC methods. @@ -269,7 +282,7 @@ where provider: Provider, pool: Pool, network: Network, - eth_cache: EthStateCache, + eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, @@ -312,7 +325,10 @@ where } } -impl EthApiInner { +impl EthApiInner +where + Provider: BlockReader, +{ /// Returns a handle to data on disk. #[inline] pub const fn provider(&self) -> &Provider { @@ -321,7 +337,7 @@ impl EthApiInner &EthStateCache { + pub const fn cache(&self) -> &EthStateCache { &self.eth_cache } @@ -455,8 +471,7 @@ mod tests { ) -> EthApi { let evm_config = EthEvmConfig::new(provider.chain_spec()); let cache = EthStateCache::spawn(provider.clone(), Default::default()); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); let gas_cap = provider.chain_spec().max_gas_limit(); EthApi::new( diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 6ed72b6ca74..8f50fefcb61 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -9,8 +9,8 @@ use alloy_rpc_types_eth::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::{Receipt, SealedBlockWithSenders}; -use reth_provider::{BlockIdReader, BlockReader, ProviderError}; +use reth_primitives::SealedBlockWithSenders; +use reth_provider::{BlockIdReader, BlockReader, ProviderBlock, ProviderError, ProviderReceipt}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, }; @@ -40,7 +40,7 @@ use tracing::{error, trace}; const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb /// `Eth` filter RPC implementation. -pub struct EthFilter { +pub struct EthFilter { /// All nested fields bundled together inner: Arc>>, /// Assembles response data w.r.t. network. @@ -50,6 +50,7 @@ pub struct EthFilter { impl Clone for EthFilter where Eth: EthApiTypes, + Provider: BlockReader, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } @@ -58,7 +59,7 @@ where impl EthFilter where - Provider: Send + Sync + 'static, + Provider: BlockReader + Send + Sync + 'static, Pool: Send + Sync + 'static, Eth: EthApiTypes + 'static, { @@ -73,7 +74,7 @@ where pub fn new( provider: Provider, pool: Pool, - eth_cache: EthStateCache, + eth_cache: EthStateCache, config: EthFilterConfig, task_spawner: Box, tx_resp_builder: Eth::TransactionCompat, @@ -334,6 +335,7 @@ where impl std::fmt::Debug for EthFilter where Eth: EthApiTypes, + Provider: BlockReader, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthFilter").finish_non_exhaustive() @@ -342,7 +344,7 @@ where /// Container type `EthFilter` #[derive(Debug)] -struct EthFilterInner { +struct EthFilterInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -356,7 +358,7 @@ struct EthFilterInner { /// Maximum number of logs that can be returned in a response max_logs_per_response: usize, /// The async cache frontend for eth related data - eth_cache: EthStateCache, + eth_cache: EthStateCache, /// maximum number of headers to read at once for range filter max_headers_range: u64, /// The type that can spawn tasks. @@ -536,8 +538,13 @@ where &self, block_num_hash: &BlockNumHash, best_number: u64, - ) -> Result>, Option>)>, EthFilterError> - { + ) -> Result< + Option<( + Arc>>, + Option>>>, + )>, + EthFilterError, + > { // The last 4 blocks are most likely cached, so we can just fetch them let cached_range = best_number.saturating_sub(4)..=best_number; let receipts_block = if cached_range.contains(&block_num_hash.number) { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index f6aae34b961..409a3095abd 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -2,10 +2,10 @@ use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_provider::{BlockReader, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcReceipt, + RpcNodeCoreExt, RpcReceipt, }; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -18,6 +18,7 @@ where NetworkTypes: alloy_network::Network, Provider: HeaderProvider, >, + Provider: BlockReader, { async fn block_receipts( &self, @@ -62,7 +63,7 @@ where impl LoadBlock for EthApi where - Self: LoadPendingBlock + SpawnBlocking, - Provider: BlockReaderIdExt, + Self: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt, + Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index c0594c023fa..3835503a4c8 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -3,12 +3,15 @@ use crate::EthApi; use alloy_consensus::Header; use reth_evm::ConfigureEvm; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{ estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, }; -impl EthCall for EthApi where - Self: EstimateCall + LoadPendingBlock +impl EthCall for EthApi +where + Self: EstimateCall + LoadPendingBlock, + Provider: BlockReader, { } @@ -16,6 +19,7 @@ impl Call for EthApi> + SpawnBlocking, EvmConfig: ConfigureEvm

, + Provider: BlockReader, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -28,7 +32,9 @@ where } } -impl EstimateCall for EthApi where - Self: Call +impl EstimateCall for EthApi +where + Self: Call, + Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index e1a17ef647c..045d6dcb545 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,14 +1,18 @@ //! Contains RPC handler implementations for fee history. use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, +}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use crate::EthApi; -impl EthFees for EthApi where - Self: LoadFee +impl EthFees for EthApi +where + Self: LoadFee, + Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index afd69a2f404..8d8d15d2e46 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -4,7 +4,8 @@ use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderTx, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderTx, + StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, @@ -33,6 +34,7 @@ where >, Evm: ConfigureEvm
>, >, + Provider: BlockReader, { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index ae723fc5314..12fbf095734 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,7 +1,7 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_provider::{ReceiptProvider, TransactionsProvider}; +use reth_provider::{BlockReader, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -13,6 +13,7 @@ where Provider: TransactionsProvider + ReceiptProvider, >, + Provider: BlockReader, { async fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 32645ba08d6..022c3153b01 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -11,11 +11,14 @@ use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use reth_primitives::TransactionSigned; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; impl AddDevSigners for EthApi +where + Provider: BlockReader, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index a44692e18a3..f7bc89ae2b1 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,7 +1,7 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; -use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; +use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider, StageCheckpointReader}; use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use crate::EthApi; @@ -14,6 +14,7 @@ where + StageCheckpointReader, Network: NetworkInfo, >, + Provider: BlockReader, { fn starting_block(&self) -> U256 { self.inner.starting_block() diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 1b9c5bffd21..99d2856ad83 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,12 +1,12 @@ //! Contains RPC handler implementations specific to state. use reth_chainspec::EthereumHardforks; -use reth_provider::{ChainSpecProvider, StateProviderFactory}; +use reth_provider::{BlockReader, ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; use reth_rpc_eth_api::{ helpers::{EthState, LoadState, SpawnBlocking}, - RpcNodeCore, + RpcNodeCoreExt, }; use crate::EthApi; @@ -14,17 +14,22 @@ use crate::EthApi; impl EthState for EthApi where Self: LoadState + SpawnBlocking, + Provider: BlockReader, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() } } -impl LoadState for EthApi where - Self: RpcNodeCore< - Provider: StateProviderFactory + ChainSpecProvider, +impl LoadState for EthApi +where + Self: RpcNodeCoreExt< + Provider: BlockReader + + StateProviderFactory + + ChainSpecProvider, Pool: TransactionPool, - > + >, + Provider: BlockReader, { } @@ -58,12 +63,12 @@ mod tests { pool, NoopNetwork::default(), cache.clone(), - GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), + GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + FeeHistoryCache::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) @@ -84,12 +89,12 @@ mod tests { pool, (), cache.clone(), - GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), + GasPriceOracle::new(mock_provider, Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW + 1, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + FeeHistoryCache::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 9c60a4c105f..ed7150153e5 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -7,7 +7,9 @@ use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; -impl Trace for EthApi where - Self: LoadState> +impl Trace for EthApi +where + Self: LoadState>, + Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 7f2ca4f772a..647e16c25af 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -1,9 +1,9 @@ //! Contains RPC handler implementations specific to transactions -use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_provider::{BlockReader, BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FullEthApiTypes, RpcNodeCore, + FullEthApiTypes, RpcNodeCoreExt, }; use reth_transaction_pool::TransactionPool; @@ -13,6 +13,7 @@ impl EthTransactions for EthApi where Self: LoadTransaction, + Provider: BlockReader, { #[inline] fn signers(&self) -> &parking_lot::RwLock>> { @@ -25,7 +26,8 @@ impl LoadTransaction where Self: SpawnBlocking + FullEthApiTypes - + RpcNodeCore, + + RpcNodeCoreExt, + Provider: BlockReader, { } @@ -58,8 +60,7 @@ mod tests { let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); let cache = EthStateCache::spawn(noop_provider, Default::default()); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); let eth_api = EthApi::new( noop_provider, pool.clone(), diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 917796038e9..43a86aaf750 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -40,6 +40,9 @@ impl BlockSource { } } +/// A helper type alias to access [`BlockReader::Block`]. +pub type ProviderBlock

=

::Block; + /// Api trait for fetching `Block` related data. /// /// If not requested otherwise, implementers of this trait should prioritize fetching blocks from diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 67257cce67c..fdb70323856 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,14 +1,18 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{TxHash, TxNumber}; +use reth_primitives_traits::Receipt; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; +/// A helper type alias to access [`ReceiptProvider::Receipt`]. +pub type ProviderReceipt

=

::Receipt; + /// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { /// The receipt type. - type Receipt: Send + Sync; + type Receipt: Receipt; /// Get receipt by transaction number /// diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 3bb20b7e161..ca2bcaeb469 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,4 +1,4 @@ -use crate::{BlockNumReader, BlockReader, ReceiptProvider}; +use crate::{BlockNumReader, BlockReader}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; use reth_primitives::TransactionMeta; @@ -84,9 +84,6 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// A helper type alias to access [`TransactionsProvider::Transaction`]. pub type ProviderTx

=

::Transaction; -/// A helper type alias to access [`ReceiptProvider::Receipt`]. -pub type ProviderReceipt

=

::Receipt; - /// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { From da03b7989e61f5d3f2b2a576f0c153d996189b05 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 5 Dec 2024 20:45:43 +0700 Subject: [PATCH 891/970] perf(trie): avoid (de)allocating an extra prefix set (#13020) --- crates/trie/db/src/storage.rs | 4 +++- crates/trie/parallel/src/root.rs | 3 ++- crates/trie/trie/src/trie.rs | 13 +++++++------ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs index 6a3bbe1b965..3e40b298fac 100644 --- a/crates/trie/db/src/storage.rs +++ b/crates/trie/db/src/storage.rs @@ -43,6 +43,7 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), address, + Default::default(), #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) @@ -53,6 +54,7 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), hashed_address, + Default::default(), #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) @@ -70,10 +72,10 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), address, + prefix_set, #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) - .with_prefix_set(prefix_set) .root() } } diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index e66d1f78213..2aace02ed9b 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -121,10 +121,10 @@ where trie_cursor_factory, hashed_state, hashed_address, + prefix_set, #[cfg(feature = "metrics")] metrics, ) - .with_prefix_set(prefix_set) .calculate(retain_updates)?) })(); let _ = tx.send(result); @@ -179,6 +179,7 @@ where trie_cursor_factory.clone(), hashed_cursor_factory.clone(), hashed_address, + Default::default(), #[cfg(feature = "metrics")] self.metrics.storage_trie.clone(), ) diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 28517b23e90..953b8d31fc4 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -202,15 +202,13 @@ where self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), hashed_address, - #[cfg(feature = "metrics")] - self.metrics.storage_trie.clone(), - ) - .with_prefix_set( self.prefix_sets .storage_prefix_sets .get(&hashed_address) .cloned() .unwrap_or_default(), + #[cfg(feature = "metrics")] + self.metrics.storage_trie.clone(), ); let storage_root = if retain_updates { @@ -301,29 +299,32 @@ impl StorageRoot { trie_cursor_factory: T, hashed_cursor_factory: H, address: Address, + prefix_set: PrefixSet, #[cfg(feature = "metrics")] metrics: TrieRootMetrics, ) -> Self { Self::new_hashed( trie_cursor_factory, hashed_cursor_factory, keccak256(address), + prefix_set, #[cfg(feature = "metrics")] metrics, ) } /// Creates a new storage root calculator given a hashed address. - pub fn new_hashed( + pub const fn new_hashed( trie_cursor_factory: T, hashed_cursor_factory: H, hashed_address: B256, + prefix_set: PrefixSet, #[cfg(feature = "metrics")] metrics: TrieRootMetrics, ) -> Self { Self { trie_cursor_factory, hashed_cursor_factory, hashed_address, - prefix_set: PrefixSet::default(), + prefix_set, #[cfg(feature = "metrics")] metrics, } From 6f0cfefe517d8b1f14dbc9873e1e1ccface4002a Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:11:15 -0600 Subject: [PATCH 892/970] feat: Support sending raw capability messages (#13028) Co-authored-by: Matthias Seitz --- crates/net/eth-wire/src/capability.rs | 2 +- crates/net/eth-wire/src/ethstream.rs | 51 +++++++++++++++++++++++- crates/net/network/src/session/active.rs | 5 +++ crates/net/network/src/session/conn.rs | 9 +++++ 4 files changed, 65 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 1e1bb1b2012..0dc9119ce88 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -23,7 +23,7 @@ use std::{ pub struct RawCapabilityMessage { /// Identifier of the message. pub id: usize, - /// Actual payload + /// Actual __encoded__ payload pub payload: Bytes, } diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 77266c1b703..ccc80594b60 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -1,4 +1,5 @@ use crate::{ + capability::RawCapabilityMessage, errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, @@ -6,6 +7,7 @@ use crate::{ Status, }; use alloy_primitives::bytes::{Bytes, BytesMut}; +use alloy_rlp::Encodable; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_eth_wire_types::NetworkPrimitives; @@ -252,6 +254,16 @@ where Ok(()) } + + /// Sends a raw capability message directly over the stream + pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> { + let mut bytes = Vec::new(); + msg.id.encode(&mut bytes); + bytes.extend_from_slice(&msg.payload); + + self.inner.start_send_unpin(bytes.into())?; + Ok(()) + } } impl Stream for EthStream @@ -361,13 +373,15 @@ mod tests { use crate::{ broadcast::BlockHashNumber, errors::{EthHandshakeError, EthStreamError}, + ethstream::RawCapabilityMessage, hello::DEFAULT_TCP_PORT, p2pstream::UnauthedP2PStream, EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, ProtocolVersion, Status, }; use alloy_chains::NamedChain; - use alloy_primitives::{B256, U256}; + use alloy_primitives::{bytes::Bytes, B256, U256}; + use alloy_rlp::Decodable; use futures::{SinkExt, StreamExt}; use reth_ecies::stream::ECIESStream; use reth_eth_wire_types::EthNetworkPrimitives; @@ -743,4 +757,39 @@ mod tests { matches!(handshake_result, Err(e) if e.to_string() == EthStreamError::StreamTimeout.to_string()) ); } + + #[tokio::test] + async fn can_write_and_read_raw_capability() { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let local_addr = listener.local_addr().unwrap(); + + let test_msg = RawCapabilityMessage { id: 0x1234, payload: Bytes::from(vec![1, 2, 3, 4]) }; + + let test_msg_clone = test_msg.clone(); + let handle = tokio::spawn(async move { + let (incoming, _) = listener.accept().await.unwrap(); + let stream = PassthroughCodec::default().framed(incoming); + let mut stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, stream); + + let bytes = stream.inner_mut().next().await.unwrap().unwrap(); + + // Create a cursor to track position while decoding + let mut id_bytes = &bytes[..]; + let decoded_id = ::decode(&mut id_bytes).unwrap(); + assert_eq!(decoded_id, test_msg_clone.id); + + // Get remaining bytes after ID decoding + let remaining = id_bytes; + assert_eq!(remaining, &test_msg_clone.payload[..]); + }); + + let outgoing = TcpStream::connect(local_addr).await.unwrap(); + let sink = PassthroughCodec::default().framed(outgoing); + let mut client_stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, sink); + + client_stream.start_send_raw(test_msg).unwrap(); + client_stream.inner_mut().flush().await.unwrap(); + + handle.await.unwrap(); + } } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index af9bb2f0856..7b7837090cf 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -23,6 +23,7 @@ use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; use reth_eth_wire::{ + capability::RawCapabilityMessage, errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, @@ -278,6 +279,7 @@ impl ActiveSession { } PeerMessage::Other(other) => { debug!(target: "net::session", message_id=%other.id, "Ignoring unsupported message"); + self.queued_outgoing.push_back(OutgoingMessage::Raw(other)); } } } @@ -559,6 +561,7 @@ impl Future for ActiveSession { let res = match msg { OutgoingMessage::Eth(msg) => this.conn.start_send_unpin(msg), OutgoingMessage::Broadcast(msg) => this.conn.start_send_broadcast(msg), + OutgoingMessage::Raw(msg) => this.conn.start_send_raw(msg), }; if let Err(err) = res { debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to send message"); @@ -738,6 +741,8 @@ pub(crate) enum OutgoingMessage { Eth(EthMessage), /// A message that may be shared by multiple sessions. Broadcast(EthBroadcastMessage), + /// A raw capability message + Raw(RawCapabilityMessage), } impl From> for OutgoingMessage { diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 6f87c26d6f5..c948937a04d 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -3,6 +3,7 @@ use futures::{Sink, Stream}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ + capability::RawCapabilityMessage, errors::EthStreamError, message::EthBroadcastMessage, multiplex::{ProtocolProxy, RlpxSatelliteStream}, @@ -84,6 +85,14 @@ impl EthRlpxConnection { Self::Satellite(conn) => conn.primary_mut().start_send_broadcast(item), } } + + /// Sends a raw capability message over the connection + pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> { + match self { + Self::EthOnly(conn) => conn.start_send_raw(msg), + Self::Satellite(conn) => conn.primary_mut().start_send_raw(msg), + } + } } impl From> for EthRlpxConnection { From 3ccbda8e1a9a5fb8d492dd92b56abd65c30b5e2b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:21:01 +0000 Subject: [PATCH 893/970] chore: add reference to the troubleshooting page on `mdbx::Error::Access` (#13151) --- crates/storage/libmdbx-rs/src/error.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index a70488b0826..a8c74556597 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -97,7 +97,7 @@ pub enum Error { #[error("invalid parameter specified")] DecodeError, /// The environment opened in read-only. - #[error("the environment opened in read-only")] + #[error("the environment opened in read-only, check for more")] Access, /// Database is too large for the current system. #[error("database is too large for the current system")] @@ -238,7 +238,10 @@ mod tests { #[test] fn test_description() { - assert_eq!("the environment opened in read-only", Error::from_err_code(13).to_string()); + assert_eq!( + "the environment opened in read-only, check for more", + Error::from_err_code(13).to_string() + ); assert_eq!("file is not an MDBX file", Error::Invalid.to_string()); } From eb4aa2c07799e1c40038a99627edbe93bd2b9402 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 5 Dec 2024 16:32:32 +0100 Subject: [PATCH 894/970] fix(engine): wait for all state updates before returning state root task result (#13156) --- crates/engine/tree/src/tree/root.rs | 35 ++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index eea236fdaae..86b6b9f6036 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -18,6 +18,7 @@ use reth_trie_sparse::{ use revm_primitives::{keccak256, EvmState, B256}; use std::{ collections::BTreeMap, + ops::Deref, sync::{ mpsc::{self, Receiver, Sender}, Arc, @@ -84,6 +85,8 @@ pub(crate) enum StateRootMessage { /// Time taken to calculate the root elapsed: Duration, }, + /// Signals state update stream end. + FinishedStateUpdates, } /// Handle to track proof calculation ordering @@ -152,6 +155,25 @@ impl ProofSequencer { } } +/// A wrapper for the sender that signals completion when dropped +#[allow(dead_code)] +pub(crate) struct StateHookSender(Sender); + +impl Deref for StateHookSender { + type Target = Sender; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Drop for StateHookSender { + fn drop(&mut self) { + // Send completion signal when the sender is dropped + let _ = self.0.send(StateRootMessage::FinishedStateUpdates); + } +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -354,6 +376,7 @@ where let mut updates_received = 0; let mut proofs_processed = 0; let mut roots_calculated = 0; + let mut updates_finished = false; loop { match self.rx.recv() { @@ -375,6 +398,9 @@ where self.tx.clone(), ); } + StateRootMessage::FinishedStateUpdates => { + updates_finished = true; + } StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { proofs_processed += 1; trace!( @@ -434,7 +460,7 @@ where std::mem::take(&mut current_state_update), std::mem::take(&mut current_multiproof), ); - } else if all_proofs_received && no_pending { + } else if all_proofs_received && no_pending && updates_finished { debug!( target: "engine::root", total_updates = updates_received, @@ -710,10 +736,13 @@ mod tests { let task = StateRootTask::new(config, tx.clone(), rx); let handle = task.spawn(); + let state_hook_sender = StateHookSender(tx); for update in state_updates { - tx.send(StateRootMessage::StateUpdate(update)).expect("failed to send state"); + state_hook_sender + .send(StateRootMessage::StateUpdate(update)) + .expect("failed to send state"); } - drop(tx); + drop(state_hook_sender); let (root_from_task, _) = handle.wait_for_result().expect("task failed"); let root_from_base = state_root(accumulated_state); From d71a4be9821b5c4add501608a0895fb4ce581b63 Mon Sep 17 00:00:00 2001 From: Steven <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:42:52 -0600 Subject: [PATCH 895/970] feat: Add ratelimiting to OpWitness API (#12998) Co-authored-by: Matthias Seitz --- crates/optimism/node/src/node.rs | 7 ++++-- crates/optimism/rpc/src/witness.rs | 35 ++++++++++++++++++++++++------ crates/rpc/rpc-api/src/debug.rs | 4 ++-- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c7552135233..bd045320f52 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -253,8 +253,11 @@ where ) -> eyre::Result { let Self { rpc_add_ons, da_config } = self; // install additional OP specific rpc methods - let debug_ext = - OpDebugWitnessApi::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + let debug_ext = OpDebugWitnessApi::new( + ctx.node.provider().clone(), + ctx.node.evm_config().clone(), + Box::new(ctx.node.task_executor().clone()), + ); let miner_ext = OpMinerExtApi::new(da_config); rpc_add_ons diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index 8cc4bd98ef2..d533bb187d9 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -3,7 +3,7 @@ use alloy_consensus::Header; use alloy_primitives::B256; use alloy_rpc_types_debug::ExecutionWitness; -use jsonrpsee_core::RpcResult; +use jsonrpsee_core::{async_trait, RpcResult}; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; @@ -13,7 +13,9 @@ use reth_primitives::{SealedHeader, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_tasks::TaskSpawner; use std::{fmt::Debug, sync::Arc}; +use tokio::sync::{oneshot, Semaphore}; /// An extension to the `debug_` namespace of the RPC API. pub struct OpDebugWitnessApi { @@ -22,9 +24,14 @@ pub struct OpDebugWitnessApi { impl OpDebugWitnessApi { /// Creates a new instance of the `OpDebugWitnessApi`. - pub fn new(provider: Provider, evm_config: EvmConfig) -> Self { + pub fn new( + provider: Provider, + evm_config: EvmConfig, + task_spawner: Box, + ) -> Self { let builder = OpPayloadBuilder::new(evm_config); - let inner = OpDebugWitnessApiInner { provider, builder }; + let semaphore = Arc::new(Semaphore::new(3)); + let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; Self { inner: Arc::new(inner) } } } @@ -42,24 +49,36 @@ where } } +#[async_trait] impl DebugExecutionWitnessApiServer for OpDebugWitnessApi where Provider: BlockReaderIdExt

+ StateProviderFactory + ChainSpecProvider + + Clone + 'static, EvmConfig: ConfigureEvm
+ 'static, { - fn execute_payload( + async fn execute_payload( &self, parent_block_hash: B256, attributes: OpPayloadAttributes, ) -> RpcResult { + let _permit = self.inner.semaphore.acquire().await; + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; - self.inner - .builder - .payload_witness(&self.inner.provider, parent_header, attributes) + + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let res = + this.inner.builder.payload_witness(&this.inner.provider, parent_header, attributes); + let _ = tx.send(res); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? .map_err(|err| internal_rpc_err(err.to_string())) } } @@ -78,4 +97,6 @@ impl Debug for OpDebugWitnessApi { struct OpDebugWitnessApiInner { provider: Provider, builder: OpPayloadBuilder, + task_spawner: Box, + semaphore: Arc, } diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 28ed9af5c13..c2d1c605ff1 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -401,8 +401,8 @@ pub trait DebugExecutionWitnessApi { /// /// The first argument is the parent block hash. The second argument is the payload /// attributes for the new block. - #[method(name = "executePayload", blocking)] - fn execute_payload( + #[method(name = "executePayload")] + async fn execute_payload( &self, parent_block_hash: B256, attributes: Attributes, From 56624f820f287a0eaa38eceac10a3514378c69c7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 5 Dec 2024 16:58:19 +0100 Subject: [PATCH 896/970] chore: add typed2718 to txtype (#13076) --- crates/primitives-traits/src/size.rs | 10 ++--- .../src/transaction/tx_type.rs | 39 ++----------------- crates/primitives/src/transaction/tx_type.rs | 38 ++++++------------ .../transaction-pool/src/blobstore/tracker.rs | 3 +- 4 files changed, 20 insertions(+), 70 deletions(-) diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index f9065cda24a..a1978ff379e 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, TxType}; use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; /// Trait for calculating a heuristic for the in-memory size of a struct. @@ -28,7 +28,7 @@ macro_rules! impl_in_mem_size_size_of { }; } -impl_in_mem_size_size_of!(Signature, TxHash); +impl_in_mem_size_size_of!(Signature, TxHash, TxType); /// Implement `InMemorySize` for a type with a native `size` method. macro_rules! impl_in_mem_size { @@ -47,11 +47,7 @@ macro_rules! impl_in_mem_size { impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); #[cfg(feature = "op")] -impl InMemorySize for op_alloy_consensus::OpTxType { - fn size(&self) -> usize { - 1 - } -} +impl_in_mem_size_size_of!(op_alloy_consensus::OpTxType); #[cfg(test)] mod tests { diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index c2f2e04899d..c60cd9cb3af 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -1,6 +1,7 @@ //! Abstraction of transaction envelope type ID. use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; +use alloy_consensus::Typed2718; use alloy_primitives::{U64, U8}; use core::fmt; @@ -30,24 +31,10 @@ pub trait TxType: + TryFrom + alloy_rlp::Encodable + alloy_rlp::Decodable + + Typed2718 + InMemorySize + MaybeArbitrary { - /// Returns `true` if this is a legacy transaction. - fn is_legacy(&self) -> bool; - - /// Returns `true` if this is an eip-2930 transaction. - fn is_eip2930(&self) -> bool; - - /// Returns `true` if this is an eip-1559 transaction. - fn is_eip1559(&self) -> bool; - - /// Returns `true` if this is an eip-4844 transaction. - fn is_eip4844(&self) -> bool; - - /// Returns `true` if this is an eip-7702 transaction. - fn is_eip7702(&self) -> bool; - /// Returns whether this transaction type can be __broadcasted__ as full transaction over the /// network. /// @@ -60,24 +47,6 @@ pub trait TxType: } #[cfg(feature = "op")] -impl TxType for op_alloy_consensus::OpTxType { - fn is_legacy(&self) -> bool { - matches!(self, Self::Legacy) - } - - fn is_eip2930(&self) -> bool { - matches!(self, Self::Eip2930) - } - - fn is_eip1559(&self) -> bool { - matches!(self, Self::Eip1559) - } +impl TxType for op_alloy_consensus::OpTxType {} - fn is_eip4844(&self) -> bool { - false - } - - fn is_eip7702(&self) -> bool { - matches!(self, Self::Eip7702) - } -} +impl TxType for alloy_consensus::TxType {} diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 1d709b902b5..d0a4786dcf1 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,6 +1,9 @@ -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, +use alloy_consensus::{ + constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, + }, + Typed2718, }; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; @@ -88,33 +91,14 @@ impl TxType { } } -impl reth_primitives_traits::TxType for TxType { - #[inline] - fn is_legacy(&self) -> bool { - matches!(self, Self::Legacy) - } - - #[inline] - fn is_eip2930(&self) -> bool { - matches!(self, Self::Eip2930) - } - - #[inline] - fn is_eip1559(&self) -> bool { - matches!(self, Self::Eip1559) - } - - #[inline] - fn is_eip4844(&self) -> bool { - matches!(self, Self::Eip4844) - } - - #[inline] - fn is_eip7702(&self) -> bool { - matches!(self, Self::Eip7702) +impl Typed2718 for TxType { + fn ty(&self) -> u8 { + (*self).into() } } +impl reth_primitives_traits::TxType for TxType {} + impl InMemorySize for TxType { /// Calculates a heuristic for the in-memory size of the [`TxType`]. #[inline] diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 3fdcbe8b4ea..817114fcf25 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,9 +1,10 @@ //! Support for maintaining the blob pool. +use alloy_consensus::Typed2718; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; -use reth_primitives_traits::{Block, BlockBody, SignedTransaction, TxType}; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. From 4fe5c2a5775aa269dd2ac3dbfa2fd063f63c3fee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ahmet=20Yaz=C4=B1c=C4=B1?= <75089142+yaziciahmet@users.noreply.github.com> Date: Thu, 5 Dec 2024 19:24:28 +0100 Subject: [PATCH 897/970] Allow replacement txs with exactly price bump (#13161) --- crates/transaction-pool/src/validate/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 84caae6e7ca..40f2deeafe8 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -405,8 +405,7 @@ impl ValidPoolTransaction { let price_bump = price_bumps.price_bump(self.tx_type()); // Check if the max fee per gas is underpriced. - if maybe_replacement.max_fee_per_gas() <= self.max_fee_per_gas() * (100 + price_bump) / 100 - { + if maybe_replacement.max_fee_per_gas() < self.max_fee_per_gas() * (100 + price_bump) / 100 { return true } @@ -418,7 +417,7 @@ impl ValidPoolTransaction { // Check max priority fee per gas (relevant for EIP-1559 transactions only) if existing_max_priority_fee_per_gas != 0 && replacement_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas <= + replacement_max_priority_fee_per_gas < existing_max_priority_fee_per_gas * (100 + price_bump) / 100 { return true @@ -429,7 +428,7 @@ impl ValidPoolTransaction { // This enforces that blob txs can only be replaced by blob txs let replacement_max_blob_fee_per_gas = maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default(); - if replacement_max_blob_fee_per_gas <= + if replacement_max_blob_fee_per_gas < existing_max_blob_fee_per_gas * (100 + price_bump) / 100 { return true From 8226fa0cacc42c5ca55f7638b3fed57a16cac008 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 5 Dec 2024 22:50:43 +0400 Subject: [PATCH 898/970] feat: use network tx for `Pool::Pooled` (#13159) Co-authored-by: Matthias Seitz --- crates/ethereum/node/src/node.rs | 10 ++- crates/net/network/src/test_utils/testnet.rs | 26 ++++-- crates/net/network/src/transactions/mod.rs | 21 ++--- crates/node/builder/src/builder/mod.rs | 10 ++- crates/optimism/node/src/node.rs | 10 ++- crates/optimism/rpc/src/eth/transaction.rs | 3 +- .../rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc-eth-types/src/utils.rs | 12 +-- crates/rpc/rpc/src/eth/bundle.rs | 2 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 5 +- crates/rpc/rpc/src/trace.rs | 4 +- crates/transaction-pool/src/lib.rs | 15 +--- crates/transaction-pool/src/maintain.rs | 11 +-- crates/transaction-pool/src/noop.rs | 14 +--- crates/transaction-pool/src/pool/mod.rs | 20 +---- .../transaction-pool/src/test_utils/mock.rs | 41 +++++++--- crates/transaction-pool/src/traits.rs | 80 ++++++++++--------- 17 files changed, 148 insertions(+), 138 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index dd4f1e5802c..b2fc7e677ac 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -25,7 +25,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, PooledTransactionsElement}; use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -309,8 +309,12 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool>> - + Unpin + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TxTy, + Pooled = PooledTransactionsElement, + >, + > + Unpin + 'static, { async fn build_network( diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 249cd1a6beb..3a50d890e70 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -20,7 +20,7 @@ use reth_network_api::{ NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers, }; use reth_network_peers::PeerId; -use reth_primitives::TransactionSigned; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; use reth_provider::{test_utils::NoopProvider, ChainSpecProvider}; use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; use reth_tasks::TokioTaskExecutor; @@ -206,8 +206,12 @@ where + Clone + Unpin + 'static, - Pool: TransactionPool> - + Unpin + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + 'static, { /// Spawns the testnet to a separate task @@ -273,8 +277,12 @@ where > + HeaderProvider + Unpin + 'static, - Pool: TransactionPool> - + Unpin + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + 'static, { type Output = (); @@ -476,8 +484,12 @@ where > + HeaderProvider + Unpin + 'static, - Pool: TransactionPool> - + Unpin + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + 'static, { type Output = (); diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 3864c0fcc0b..fefff2bac35 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -49,9 +49,7 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, TransactionSigned, -}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, TransactionSigned}; use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ @@ -703,10 +701,8 @@ where BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, >, - Pool::Transaction: PoolTransaction< - Consensus = N::BroadcastedTransaction, - Pooled: Into + From>, - >, + Pool::Transaction: + PoolTransaction, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -991,13 +987,12 @@ where let _ = response.send(Ok(PooledTransactions::default())); return } - let transactions = self.pool.get_pooled_transactions_as::( + let transactions = self.pool.get_pooled_transaction_elements( request.0, GetPooledTransactionLimit::ResponseSizeSoftLimit( self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, ), ); - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| tx.tx_hash()), "Sending requested transactions to peer"); // we sent a response at which point we assume that the peer is aware of the @@ -1247,7 +1242,7 @@ where } else { // this is a new transaction that should be imported into the pool - let pool_transaction = Pool::Transaction::from_pooled(tx.into()); + let pool_transaction = Pool::Transaction::from_pooled(tx); new_txs.push(pool_transaction); entry.insert(HashSet::from([peer_id])); @@ -1338,10 +1333,8 @@ where BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, >, - Pool::Transaction: PoolTransaction< - Consensus = N::BroadcastedTransaction, - Pooled: Into + From>, - >, + Pool::Transaction: + PoolTransaction, { type Output = (); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 3cab01aa71b..e2b18f666c7 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -651,7 +651,10 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool< - Transaction: PoolTransaction, + Transaction: PoolTransaction< + Consensus = reth_primitives::TransactionSigned, + Pooled = reth_primitives::PooledTransactionsElement, + >, > + Unpin + 'static, Node::Provider: BlockReader< @@ -677,7 +680,10 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool< - Transaction: PoolTransaction, + Transaction: PoolTransaction< + Consensus = reth_primitives::TransactionSigned, + Pooled = reth_primitives::PooledTransactionsElement, + >, > + Unpin + 'static, Node::Provider: BlockReader< diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index bd045320f52..b7dcf2741c6 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -35,7 +35,7 @@ use reth_optimism_rpc::{ OpEthApi, SequencerClient, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{BlockBody, TransactionSigned}; +use reth_primitives::{BlockBody, PooledTransactionsElement, TransactionSigned}; use reth_provider::{ providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, @@ -633,8 +633,12 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool>> - + Unpin + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TxTy, + Pooled = PooledTransactionsElement, + >, + > + Unpin + 'static, { async fn build_network( diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index b5d4ce2bc55..cfc81ab644e 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -31,8 +31,7 @@ where /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(tx.clone())?; - let pool_transaction = - ::Transaction::from_pooled(recovered.into()); + let pool_transaction = ::Transaction::from_pooled(recovered); // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index f73d761600e..5c6478540f7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -344,7 +344,7 @@ pub trait EthTransactions: LoadTransaction { async move { let recovered = recover_raw_transaction(tx)?; let pool_transaction = - ::Transaction::from_pooled(recovered.into()); + ::Transaction::from_pooled(recovered); // submit the transaction to the pool with a `Local` origin let hash = self diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 596acc74ce1..64f159ea0e8 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,21 +1,21 @@ //! Commonly used code snippets -use alloy_eips::eip2718::Decodable2718; use alloy_primitives::Bytes; -use reth_primitives::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredTx}; +use reth_primitives_traits::SignedTransaction; use std::future::Future; use super::{EthApiError, EthResult}; -/// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. +/// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. /// -/// See [`Decodable2718::decode_2718`] -pub fn recover_raw_transaction(data: Bytes) -> EthResult { +/// See [`alloy_eips::eip2718::Decodable2718::decode_2718`] +pub fn recover_raw_transaction(data: Bytes) -> EthResult> { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } - let transaction = PooledTransactionsElement::decode_2718(&mut data.as_ref()) + let transaction = T::decode_2718(&mut data.as_ref()) .map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; transaction.try_into_ecrecovered().or(Err(EthApiError::InvalidTransactionSignature)) diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 2924e6ea25f..d5117650a13 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -79,7 +79,7 @@ where let transactions = txs .into_iter() - .map(recover_raw_transaction) + .map(recover_raw_transaction::) .collect::, _>>()? .into_iter() .map(|tx| tx.to_components()) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 87778ec6e65..0702d7df0e4 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -10,7 +10,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::TransactionSigned; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; @@ -171,7 +171,8 @@ where match &body[idx] { BundleItem::Tx { tx, can_revert } => { let recovered_tx = - recover_raw_transaction(tx.clone()).map_err(EthApiError::from)?; + recover_raw_transaction::(tx.clone()) + .map_err(EthApiError::from)?; let (tx, signer) = recovered_tx.to_components(); let tx = tx.into_transaction(); diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index f81eefdc5ff..009203f757d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -19,6 +19,7 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; +use reth_primitives::PooledTransactionsElement; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; @@ -115,7 +116,8 @@ where trace_types: HashSet, block_id: Option, ) -> Result { - let tx = recover_raw_transaction(tx)?.into_ecrecovered_transaction(); + let tx = recover_raw_transaction::(tx)? + .into_ecrecovered_transaction(); let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 1c383e8edf0..0e069291e73 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,6 +156,7 @@ use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; +use reth_primitives::RecoveredTx; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -419,21 +420,11 @@ where self.pool.get_pooled_transaction_elements(tx_hashes, limit) } - fn get_pooled_transactions_as

( - &self, - tx_hashes: Vec, - limit: GetPooledTransactionLimit, - ) -> Vec

- where - ::Pooled: Into

, - { - self.pool.get_pooled_transactions_as(tx_hashes, limit) - } - fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option<<::Transaction as PoolTransaction>::Pooled> { + ) -> Option::Transaction as PoolTransaction>::Pooled>> + { self.pool.get_pooled_transaction_element(tx_hash) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index fa7b75e34ad..96971b487f0 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -4,7 +4,7 @@ use crate::{ blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, error::PoolError, metrics::MaintainPoolMetrics, - traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, + traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, PoolUpdateKind, }; use alloy_consensus::BlockHeader; @@ -20,8 +20,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, PooledTransactionsElementEcRecovered, - SealedHeader, TransactionSigned, + transaction::SignedTransactionIntoRecoveredExt, SealedHeader, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -335,13 +334,9 @@ pub async fn maintain_transaction_pool( .flatten() .map(Arc::unwrap_or_clone) .and_then(|sidecar| { - PooledTransactionsElementEcRecovered::try_from_blob_transaction( +

::Transaction::try_from_eip4844( tx, sidecar, ) - .ok() - }) - .map(|tx| { -

::Transaction::from_pooled(tx.into()) }) } else {

::Transaction::try_from_consensus(tx).ok() diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 3a068d3a593..8d880994aa9 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -22,6 +22,7 @@ use alloy_eips::{ }; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; +use reth_primitives::RecoveredTx; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -139,21 +140,10 @@ impl TransactionPool for NoopTransactionPool { vec![] } - fn get_pooled_transactions_as( - &self, - _tx_hashes: Vec, - _limit: GetPooledTransactionLimit, - ) -> Vec - where - ::Pooled: Into, - { - vec![] - } - fn get_pooled_transaction_element( &self, _tx_hash: TxHash, - ) -> Option<::Pooled> { + ) -> Option::Pooled>> { None } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index fce1b9acef8..044b192fe59 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -88,6 +88,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; +use reth_primitives::RecoveredTx; use std::{ collections::{HashMap, HashSet}, fmt, @@ -312,7 +313,7 @@ where fn to_pooled_transaction( &self, transaction: Arc>, - ) -> Option<<::Transaction as PoolTransaction>::Pooled> + ) -> Option::Transaction as PoolTransaction>::Pooled>> where ::Transaction: EthPoolTransaction, { @@ -342,19 +343,6 @@ where ) -> Vec<<::Transaction as PoolTransaction>::Pooled> where ::Transaction: EthPoolTransaction, - { - self.get_pooled_transactions_as(tx_hashes, limit) - } - - /// Returns pooled transactions for the given transaction hashes as the requested type. - pub fn get_pooled_transactions_as

( - &self, - tx_hashes: Vec, - limit: GetPooledTransactionLimit, - ) -> Vec

- where - ::Transaction: EthPoolTransaction, - <::Transaction as PoolTransaction>::Pooled: Into

, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); @@ -366,7 +354,7 @@ where }; size += encoded_len; - elements.push(pooled.into()); + elements.push(pooled.into_signed()); if limit.exceeds(size) { break @@ -380,7 +368,7 @@ where pub fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option<<::Transaction as PoolTransaction>::Pooled> + ) -> Option::Transaction as PoolTransaction>::Pooled>> where ::Transaction: EthPoolTransaction, { diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 0e8b26faf83..d174c7b1604 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -25,8 +25,9 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, - RecoveredTx, Transaction, TransactionSigned, TxType, + transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, + TransactionSigned, TxType, }; use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -594,7 +595,7 @@ impl PoolTransaction for MockTransaction { type Consensus = TransactionSigned; - type Pooled = PooledTransactionsElementEcRecovered; + type Pooled = PooledTransactionsElement; fn try_from_consensus( tx: RecoveredTx, @@ -606,14 +607,17 @@ impl PoolTransaction for MockTransaction { self.into() } - fn from_pooled(pooled: Self::Pooled) -> Self { + fn from_pooled(pooled: RecoveredTx) -> Self { pooled.into() } fn try_consensus_into_pooled( tx: RecoveredTx, - ) -> Result { - Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + ) -> Result, Self::TryFromConsensusError> { + let (tx, signer) = tx.to_components(); + Self::Pooled::try_from(tx) + .map(|tx| tx.with_signer(signer)) + .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) } fn hash(&self) -> &TxHash { @@ -786,12 +790,25 @@ impl EthPoolTransaction for MockTransaction { } } - fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { - Self::Pooled::try_from_blob_transaction( - self.into_consensus(), - Arc::unwrap_or_clone(sidecar), - ) - .ok() + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option> { + let (tx, signer) = self.into_consensus().to_components(); + Self::Pooled::try_from_blob_transaction(tx, Arc::unwrap_or_clone(sidecar)) + .map(|tx| tx.with_signer(signer)) + .ok() + } + + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option { + let (tx, signer) = tx.to_components(); + Self::Pooled::try_from_blob_transaction(tx, sidecar) + .map(|tx| tx.with_signer(signer)) + .ok() + .map(Self::from_pooled) } fn validate_blob( diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 15f824e7d43..aa238ded2f0 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -19,8 +19,10 @@ use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, RecoveredTx, SealedBlock, Transaction, TransactionSigned, + kzg::KzgSettings, + transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, RecoveredTx, SealedBlock, + Transaction, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] @@ -236,15 +238,6 @@ pub trait TransactionPool: Send + Sync + Clone { limit: GetPooledTransactionLimit, ) -> Vec<::Pooled>; - /// Returns the pooled transaction variant for the given transaction hash as the requested type. - fn get_pooled_transactions_as( - &self, - tx_hashes: Vec, - limit: GetPooledTransactionLimit, - ) -> Vec - where - ::Pooled: Into; - /// Returns the pooled transaction variant for the given transaction hash. /// /// This adheres to the expected behavior of @@ -260,15 +253,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option<::Pooled>; - - /// Returns the pooled transaction variant for the given transaction hash as the requested type. - fn get_pooled_transaction_as(&self, tx_hash: TxHash) -> Option - where - ::Pooled: Into, - { - self.get_pooled_transaction_element(tx_hash).map(Into::into) - } + ) -> Option::Pooled>>; /// Returns an iterator that yields transactions that are ready for block production. /// @@ -973,6 +958,7 @@ pub trait PoolTransaction: + Clone + TryFrom, Error = Self::TryFromConsensusError> + Into> + + From> { /// Associated error type for the `try_from_consensus` method. type TryFromConsensusError: fmt::Display; @@ -981,7 +967,7 @@ pub trait PoolTransaction: type Consensus; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: Encodable2718 + Into; + type Pooled: SignedTransaction; /// Define a method to convert from the `Consensus` type to `Self` fn try_from_consensus( @@ -1003,19 +989,19 @@ pub trait PoolTransaction: } /// Define a method to convert from the `Pooled` type to `Self` - fn from_pooled(pooled: Self::Pooled) -> Self { + fn from_pooled(pooled: RecoveredTx) -> Self { pooled.into() } /// Tries to convert the `Consensus` type into the `Pooled` type. - fn try_into_pooled(self) -> Result { + fn try_into_pooled(self) -> Result, Self::TryFromConsensusError> { Self::try_consensus_into_pooled(self.into_consensus()) } /// Tries to convert the `Consensus` type into the `Pooled` type. fn try_consensus_into_pooled( tx: RecoveredTx, - ) -> Result; + ) -> Result, Self::TryFromConsensusError>; /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -1144,13 +1130,7 @@ pub trait PoolTransaction: /// /// This extends the [`PoolTransaction`] trait with additional methods that are specific to the /// Ethereum pool. -pub trait EthPoolTransaction: - PoolTransaction< - Pooled: From - + Into - + Into, -> -{ +pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; @@ -1162,7 +1142,18 @@ pub trait EthPoolTransaction: /// /// This returns an option, but callers should ensure that the transaction is an EIP-4844 /// transaction: [`PoolTransaction::is_eip4844`]. - fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option; + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option>; + + /// Tries to convert the `Consensus` type with a blob sidecar into the `Pooled` type. + /// + /// Returns `None` if passed transaction is not a blob transaction. + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option; /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( @@ -1258,7 +1249,7 @@ impl PoolTransaction for EthPooledTransaction { type Consensus = TransactionSigned; - type Pooled = PooledTransactionsElementEcRecovered; + type Pooled = PooledTransactionsElement; fn clone_into_consensus(&self) -> RecoveredTx { self.transaction().clone() @@ -1266,8 +1257,11 @@ impl PoolTransaction for EthPooledTransaction { fn try_consensus_into_pooled( tx: RecoveredTx, - ) -> Result { - Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + ) -> Result, Self::TryFromConsensusError> { + let (tx, signer) = tx.to_components(); + let pooled = PooledTransactionsElement::try_from_broadcast(tx) + .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; + Ok(RecoveredTx::from_signed_transaction(pooled, signer)) } /// Returns hash of the transaction. @@ -1395,7 +1389,10 @@ impl EthPoolTransaction for EthPooledTransaction { } } - fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option> { PooledTransactionsElementEcRecovered::try_from_blob_transaction( self.into_consensus(), Arc::unwrap_or_clone(sidecar), @@ -1403,6 +1400,17 @@ impl EthPoolTransaction for EthPooledTransaction { .ok() } + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option { + let (tx, signer) = tx.to_components(); + PooledTransactionsElement::try_from_blob_transaction(tx, sidecar) + .ok() + .map(|tx| tx.with_signer(signer)) + .map(Self::from_pooled) + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, From d939876f39e4003fb15db208eff630ad1814f0ba Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 5 Dec 2024 23:45:28 +0100 Subject: [PATCH 899/970] chore(engine): add StateHookSender constructor (#13162) Co-authored-by: Matthias Seitz --- crates/engine/tree/src/tree/root.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 86b6b9f6036..bb936c28341 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -159,6 +159,13 @@ impl ProofSequencer { #[allow(dead_code)] pub(crate) struct StateHookSender(Sender); +#[allow(dead_code)] +impl StateHookSender { + pub(crate) const fn new(inner: Sender) -> Self { + Self(inner) + } +} + impl Deref for StateHookSender { type Target = Sender; @@ -736,7 +743,7 @@ mod tests { let task = StateRootTask::new(config, tx.clone(), rx); let handle = task.spawn(); - let state_hook_sender = StateHookSender(tx); + let state_hook_sender = StateHookSender::new(tx); for update in state_updates { state_hook_sender .send(StateRootMessage::StateUpdate(update)) From f82a20a616f3f1e6abdff973cb8337a8b844902a Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 6 Dec 2024 15:18:23 +0700 Subject: [PATCH 900/970] fix: txpool tests (#13172) --- crates/transaction-pool/src/pool/txpool.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index c9d4e0a488e..7dd64da7364 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -2322,7 +2322,9 @@ mod tests { let on_chain_nonce = 0; let mut f = MockTransactionFactory::default(); let mut pool = AllTransactions::default(); - let tx = MockTransaction::eip1559().inc_price().inc_limit(); + let mut tx = MockTransaction::eip1559().inc_price().inc_limit(); + tx.set_priority_fee(100); + tx.set_max_fee(100); let valid_tx = f.validated(tx.clone()); let InsertOk { updates, replaced_tx, move_to, state, .. } = pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap(); @@ -2448,20 +2450,20 @@ mod tests { let first = f.validated(tx.clone()); let _ = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap(); let mut replacement = f.validated(tx.rng_hash().inc_price()); + // a price bump of 9% is not enough for a default min price bump of 10% replacement.transaction.set_priority_fee(109); replacement.transaction.set_max_fee(109); let err = pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); - // ensure first tx is not removed assert!(pool.contains(first.hash())); assert_eq!(pool.len(), 1); - // price bump of 10% is also not enough because the bump should be strictly greater than 10% + // should also fail if the bump in max fee is not enough replacement.transaction.set_priority_fee(110); - replacement.transaction.set_max_fee(110); + replacement.transaction.set_max_fee(109); let err = pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); @@ -2469,7 +2471,7 @@ mod tests { assert_eq!(pool.len(), 1); // should also fail if the bump in priority fee is not enough - replacement.transaction.set_priority_fee(111); + replacement.transaction.set_priority_fee(109); replacement.transaction.set_max_fee(110); let err = pool.insert_tx(replacement, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); From 6fc4e8acd24c1c3b0c3aa933136fc70c76d7891b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 09:44:33 +0100 Subject: [PATCH 901/970] chore: make reth codec support optional (#13166) --- crates/stages/types/Cargo.toml | 16 +++++-- crates/stages/types/src/checkpoints.rs | 58 +++++++++++++++----------- crates/stages/types/src/execution.rs | 2 +- crates/stages/types/src/lib.rs | 4 +- crates/storage/db-api/Cargo.toml | 2 +- 5 files changed, 52 insertions(+), 30 deletions(-) diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index a466b21b6f9..d8ab6355257 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -12,23 +12,33 @@ description = "Commonly used types for stages usage in reth." workspace = true [dependencies] -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } reth-trie-common.workspace = true alloy-primitives.workspace = true -modular-bitfield.workspace = true -bytes.workspace = true serde.workspace = true arbitrary = { workspace = true, features = ["derive"], optional = true } +bytes = { workspace = true, optional = true } +modular-bitfield = { workspace = true, optional = true } + [dev-dependencies] +reth-codecs.workspace = true +alloy-primitives = { workspace = true, features = ["arbitrary", "rand"] } arbitrary = { workspace = true, features = ["derive"] } +modular-bitfield.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true +bytes.workspace = true [features] +reth-codec = [ + "dep:reth-codecs", + "dep:bytes", + "dep:modular-bitfield", +] test-utils = [ "dep:arbitrary", "reth-codecs/test-utils", diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 87225f1eec4..160c901e1cb 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,11 +1,9 @@ +use super::StageId; +use alloc::vec::Vec; use alloy_primitives::{Address, BlockNumber, B256}; -use bytes::Buf; -use reth_codecs::{add_arbitrary_tests, Compact}; +use core::ops::RangeInclusive; use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode}; use serde::{Deserialize, Serialize}; -use std::ops::RangeInclusive; - -use super::StageId; /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -32,7 +30,8 @@ impl MerkleCheckpoint { } } -impl Compact for MerkleCheckpoint { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for MerkleCheckpoint { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -56,6 +55,7 @@ impl Compact for MerkleCheckpoint { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; let target_block = buf.get_u64(); let last_account_key = B256::from_slice(&buf[..32]); @@ -75,9 +75,10 @@ impl Compact for MerkleCheckpoint { } /// Saves the progress of AccountHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct AccountHashingCheckpoint { /// The next account to start hashing from. pub address: Option

, @@ -88,9 +89,10 @@ pub struct AccountHashingCheckpoint { } /// Saves the progress of StorageHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageHashingCheckpoint { /// The next account to start hashing from. pub address: Option
, @@ -103,9 +105,10 @@ pub struct StorageHashingCheckpoint { } /// Saves the progress of Execution stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ExecutionCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -114,9 +117,10 @@ pub struct ExecutionCheckpoint { } /// Saves the progress of Headers stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct HeadersCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -125,9 +129,10 @@ pub struct HeadersCheckpoint { } /// Saves the progress of Index History stages. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct IndexHistoryCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -136,9 +141,10 @@ pub struct IndexHistoryCheckpoint { } /// Saves the progress of abstract stage iterating over or downloading entities. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct EntitiesCheckpoint { /// Number of entities already processed. pub processed: u64, @@ -165,9 +171,10 @@ impl EntitiesCheckpoint { /// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across /// multiple executions. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct CheckpointBlockRange { /// The first block of the range, inclusive. pub from: BlockNumber, @@ -188,9 +195,10 @@ impl From<&RangeInclusive> for CheckpointBlockRange { } /// Saves the progress of a stage. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StageCheckpoint { /// The maximum block processed by the stage. pub block_number: BlockNumber, @@ -255,9 +263,10 @@ impl StageCheckpoint { // TODO(alexey): add a merkle checkpoint. Currently it's hard because [`MerkleCheckpoint`] // is not a Copy type. /// Stage-specific checkpoint metrics. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub enum StageUnitCheckpoint { /// Saves the progress of AccountHashing stage. Account(AccountHashingCheckpoint), @@ -386,6 +395,7 @@ stage_unit_checkpoints!( mod tests { use super::*; use rand::Rng; + use reth_codecs::Compact; #[test] fn merkle_checkpoint_roundtrip() { diff --git a/crates/stages/types/src/execution.rs b/crates/stages/types/src/execution.rs index 61f7313a380..a334951abef 100644 --- a/crates/stages/types/src/execution.rs +++ b/crates/stages/types/src/execution.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use core::time::Duration; /// The thresholds at which the execution stage writes state changes to the database. /// diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 0132c8b410d..4e01bf7dbf4 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +extern crate alloc; + mod id; use alloy_primitives::{BlockHash, BlockNumber}; pub use id::StageId; @@ -65,7 +67,7 @@ impl From for PipelineTarget { } } -impl std::fmt::Display for PipelineTarget { +impl core::fmt::Display for PipelineTarget { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Sync(block) => { diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 4f9c2d76b3f..c8d748b96f5 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -18,7 +18,7 @@ reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-prune-types.workspace = true -reth-stages-types.workspace = true +reth-stages-types = { workspace = true, features = ["reth-codec"] } reth-storage-errors.workspace = true reth-trie-common.workspace = true From da982854697579dba801d4607700fa02c9e27483 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 09:45:30 +0100 Subject: [PATCH 902/970] feat: re-export used deps from reth-codecs (#13167) --- crates/storage/codecs/Cargo.toml | 3 +-- crates/storage/codecs/derive/src/compact/flags.rs | 9 +++++++-- crates/storage/codecs/derive/src/compact/generator.rs | 2 +- crates/storage/codecs/derive/src/compact/mod.rs | 9 +++++---- crates/storage/codecs/src/lib.rs | 5 +++++ crates/storage/codecs/src/private.rs | 3 +++ 6 files changed, 22 insertions(+), 9 deletions(-) create mode 100644 crates/storage/codecs/src/private.rs diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index c3210b21ae2..76a3721629a 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -26,7 +26,7 @@ op-alloy-consensus = { workspace = true, optional = true } # misc bytes.workspace = true -modular-bitfield = { workspace = true, optional = true } +modular-bitfield.workspace = true visibility = { version = "0.1.1", optional = true} serde.workspace = true arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -65,7 +65,6 @@ alloy = [ "dep:alloy-consensus", "dep:alloy-eips", "dep:alloy-genesis", - "dep:modular-bitfield", "dep:alloy-trie", ] op = ["alloy", "dep:op-alloy-consensus"] diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 3242a611eb3..798c9ad53b4 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -1,9 +1,11 @@ use super::*; +use syn::Attribute; /// Generates the flag fieldset struct that is going to be used to store the length of fields and /// their potential presence. pub(crate) fn generate_flag_struct( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -13,6 +15,8 @@ pub(crate) fn generate_flag_struct( let flags_ident = format_ident!("{ident}Flags"); let mod_flags_ident = format_ident!("{ident}_flags"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let mut field_flags = vec![]; let total_bits = if is_enum { @@ -88,8 +92,9 @@ pub(crate) fn generate_flag_struct( pub use #mod_flags_ident::#flags_ident; #[allow(non_snake_case)] mod #mod_flags_ident { - use bytes::Buf; - use modular_bitfield::prelude::*; + use #reth_codecs::__private::Buf; + use #reth_codecs::__private::modular_bitfield; + use #reth_codecs::__private::modular_bitfield::prelude::*; #[doc = #docs] #[bitfield] diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index cf9bcc0c629..63fef05ad70 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -239,7 +239,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< } /// Function to extract the crate path from `reth_codecs(crate = "...")` attribute. -fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { +pub(crate) fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { // let default_crate_path: syn::Path = syn::parse_str("reth-codecs").unwrap(); let mut reth_codecs_path: syn::Path = syn::parse_quote!(reth_codecs); for attr in attrs { diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index b9d5cf18d6b..1c1723d2ec9 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -48,7 +48,7 @@ pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); - output.extend(generate_flag_struct(&ident, has_lifetime, &fields, is_zstd)); + output.extend(generate_flag_struct(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.into() } @@ -235,7 +235,7 @@ mod tests { let mut output = quote! {}; let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); - output.extend(generate_flag_struct(&ident, false, &fields, false)); + output.extend(generate_flag_struct(&ident, &attrs, false, &fields, false)); output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); // Expected output in a TokenStream format. Commas matter! @@ -255,8 +255,9 @@ mod tests { #[allow(non_snake_case)] mod TestStruct_flags { - use bytes::Buf; - use modular_bitfield::prelude::*; + use reth_codecs::__private::Buf; + use reth_codecs::__private::modular_bitfield; + use reth_codecs::__private::modular_bitfield::prelude::*; #[doc = "Fieldset that facilitates compacting the parent type. Used bytes: 2 | Unused bits: 1"] #[bitfield] #[derive(Clone, Copy, Debug, Default)] diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 86d397ad24f..8c6ba5e4c76 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -39,6 +39,11 @@ pub mod txtype; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; +// Used by generated code and doc tests. Not public API. +#[doc(hidden)] +#[path = "private.rs"] +pub mod __private; + /// Trait that implements the `Compact` codec. /// /// When deriving the trait for custom structs, be aware of certain limitations/recommendations: diff --git a/crates/storage/codecs/src/private.rs b/crates/storage/codecs/src/private.rs new file mode 100644 index 00000000000..6f54d9c9ca8 --- /dev/null +++ b/crates/storage/codecs/src/private.rs @@ -0,0 +1,3 @@ +pub use modular_bitfield; + +pub use bytes::Buf; From 6453b620949cca7df6661ca394e94e12854ff970 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 6 Dec 2024 08:45:08 +0000 Subject: [PATCH 903/970] feat(trie): use branch node hash masks in sparse trie (#13135) --- crates/trie/sparse/src/state.rs | 111 ++++++++++++----- crates/trie/sparse/src/trie.rs | 214 +++++++++++++++++++++----------- 2 files changed, 227 insertions(+), 98 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 7c79b58e867..edaca5c1cfc 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -103,12 +103,17 @@ impl SparseStateTrie { } /// Reveal unknown trie paths from provided leaf path and its proof for the account. + /// + /// Panics if trie updates retention is enabled. + /// /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( &mut self, account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + assert!(!self.retain_updates); + if self.is_account_revealed(&account) { return Ok(()); } @@ -121,13 +126,14 @@ impl SparseStateTrie { let trie = self.state.reveal_root_with_provider( self.provider_factory.account_node_provider(), root_node, + None, self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node)?; + trie.reveal_node(path, node, None)?; } // Mark leaf path as revealed. @@ -137,6 +143,9 @@ impl SparseStateTrie { } /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. + /// + /// Panics if trie updates retention is enabled. + /// /// NOTE: This method does not extensively validate the proof. pub fn reveal_storage_slot( &mut self, @@ -144,6 +153,8 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + assert!(!self.retain_updates); + if self.is_storage_slot_revealed(&account, &slot) { return Ok(()); } @@ -156,13 +167,14 @@ impl SparseStateTrie { let trie = self.storages.entry(account).or_default().reveal_root_with_provider( self.provider_factory.storage_node_provider(account), root_node, + None, self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node)?; + trie.reveal_node(path, node, None)?; } // Mark leaf path as revealed. @@ -186,34 +198,48 @@ impl SparseStateTrie { let trie = self.state.reveal_root_with_provider( self.provider_factory.account_node_provider(), root_node, + multiproof.branch_node_hash_masks.get(&Nibbles::default()).copied(), self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in account_nodes { let node = TrieNode::decode(&mut &bytes[..])?; - trace!(target: "trie::sparse", ?path, ?node, "Revealing account node"); - trie.reveal_node(path, node)?; + let hash_mask = if let TrieNode::Branch(_) = node { + multiproof.branch_node_hash_masks.get(&path).copied() + } else { + None + }; + + trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, "Revealing account node"); + trie.reveal_node(path, node, hash_mask)?; } } for (account, storage_subtree) in multiproof.storages { - let storage_subtree = storage_subtree.subtree.into_nodes_sorted(); - let mut storage_nodes = storage_subtree.into_iter().peekable(); + let subtree = storage_subtree.subtree.into_nodes_sorted(); + let mut nodes = subtree.into_iter().peekable(); - if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { + if let Some(root_node) = self.validate_root_node(&mut nodes)? { // Reveal root node if it wasn't already. let trie = self.storages.entry(account).or_default().reveal_root_with_provider( self.provider_factory.storage_node_provider(account), root_node, + storage_subtree.branch_node_hash_masks.get(&Nibbles::default()).copied(), self.retain_updates, )?; // Reveal the remaining proof nodes. - for (path, bytes) in storage_nodes { + for (path, bytes) in nodes { let node = TrieNode::decode(&mut &bytes[..])?; - trace!(target: "trie::sparse", ?account, ?path, ?node, "Revealing storage node"); - trie.reveal_node(path, node)?; + let hash_mask = if let TrieNode::Branch(_) = node { + storage_subtree.branch_node_hash_masks.get(&path).copied() + } else { + None + }; + + trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, "Revealing storage node"); + trie.reveal_node(path, node, hash_mask)?; } } } @@ -394,27 +420,27 @@ mod tests { use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; use reth_trie::{updates::StorageTrieUpdates, HashBuilder, TrieAccount, EMPTY_ROOT_HASH}; - use reth_trie_common::proof::ProofRetainer; + use reth_trie_common::{proof::ProofRetainer, StorageMultiProof, TrieMask}; #[test] - fn validate_proof_first_node_not_root() { + fn validate_root_node_first_node_not_root() { let sparse = SparseStateTrie::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable(),), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } #[test] - fn validate_proof_invalid_proof_with_empty_root() { + fn validate_root_node_invalid_proof_with_empty_root() { let sparse = SparseStateTrie::default(); let proof = [ (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), (Nibbles::from_nibbles([0x1]), Bytes::new()), ]; assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable(),), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -429,6 +455,7 @@ mod tests { let mut sparse = SparseStateTrie::default(); assert_eq!(sparse.state, SparseTrie::Blind); + sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); assert_eq!(sparse.state, SparseTrie::revealed_empty()); } @@ -443,6 +470,7 @@ mod tests { let mut sparse = SparseStateTrie::default(); assert!(sparse.storages.is_empty()); + sparse .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) .unwrap(); @@ -477,13 +505,15 @@ mod tests { slot_path_1.clone(), slot_path_2.clone(), ])); - storage_hash_builder.add_leaf(slot_path_1.clone(), &alloy_rlp::encode_fixed_size(&value_1)); - storage_hash_builder.add_leaf(slot_path_2.clone(), &alloy_rlp::encode_fixed_size(&value_2)); + storage_hash_builder.add_leaf(slot_path_1, &alloy_rlp::encode_fixed_size(&value_1)); + storage_hash_builder.add_leaf(slot_path_2, &alloy_rlp::encode_fixed_size(&value_2)); let storage_root = storage_hash_builder.root(); - let proof_nodes = storage_hash_builder.take_proof_nodes(); - let storage_proof_1 = proof_nodes.matching_nodes_sorted(&slot_path_1); - let storage_proof_2 = proof_nodes.matching_nodes_sorted(&slot_path_2); + let storage_proof_nodes = storage_hash_builder.take_proof_nodes(); + let storage_branch_node_hash_masks = HashMap::from_iter([ + (Nibbles::default(), TrieMask::new(0b010)), + (Nibbles::from_nibbles([0x1]), TrieMask::new(0b11)), + ]); let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); let address_path_1 = Nibbles::unpack(address_1); @@ -504,16 +534,41 @@ mod tests { let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); - let proof_1 = proof_nodes.matching_nodes_sorted(&address_path_1); - let proof_2 = proof_nodes.matching_nodes_sorted(&address_path_2); let mut sparse = SparseStateTrie::default().with_updates(true); - sparse.reveal_account(address_1, proof_1).unwrap(); - sparse.reveal_account(address_2, proof_2).unwrap(); - sparse.reveal_storage_slot(address_1, slot_1, storage_proof_1.clone()).unwrap(); - sparse.reveal_storage_slot(address_1, slot_2, storage_proof_2.clone()).unwrap(); - sparse.reveal_storage_slot(address_2, slot_1, storage_proof_1).unwrap(); - sparse.reveal_storage_slot(address_2, slot_2, storage_proof_2).unwrap(); + sparse + .reveal_multiproof( + HashMap::from_iter([ + (address_1, HashSet::from_iter([slot_1, slot_2])), + (address_2, HashSet::from_iter([slot_1, slot_2])), + ]), + MultiProof { + account_subtree: proof_nodes, + branch_node_hash_masks: HashMap::from_iter([( + Nibbles::from_nibbles([0x1]), + TrieMask::new(0b00), + )]), + storages: HashMap::from_iter([ + ( + address_1, + StorageMultiProof { + root, + subtree: storage_proof_nodes.clone(), + branch_node_hash_masks: storage_branch_node_hash_masks.clone(), + }, + ), + ( + address_2, + StorageMultiProof { + root, + subtree: storage_proof_nodes, + branch_node_hash_masks: storage_branch_node_hash_masks, + }, + ), + ]), + }, + ) + .unwrap(); assert_eq!(sparse.root(), Some(root)); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index df5dd25486c..8fff0819bcb 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -59,9 +59,10 @@ impl SparseTrie { pub fn reveal_root( &mut self, root: TrieNode, + hash_mask: Option, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie> { - self.reveal_root_with_provider(Default::default(), root, retain_updates) + self.reveal_root_with_provider(Default::default(), root, hash_mask, retain_updates) } } @@ -89,12 +90,14 @@ impl

SparseTrie

{ &mut self, provider: P, root: TrieNode, + hash_mask: Option, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie

> { if self.is_blind() { *self = Self::Revealed(Box::new(RevealedSparseTrie::from_provider_and_root( provider, root, + hash_mask, retain_updates, )?)) } @@ -153,6 +156,8 @@ pub struct RevealedSparseTrie

{ provider: P, /// All trie nodes. nodes: HashMap, + /// All branch node hash masks. + branch_node_hash_masks: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. @@ -167,6 +172,7 @@ impl

fmt::Debug for RevealedSparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) + .field("branch_hash_masks", &self.branch_node_hash_masks) .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("updates", &self.updates) @@ -180,6 +186,7 @@ impl Default for RevealedSparseTrie { Self { provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), updates: None, @@ -190,17 +197,22 @@ impl Default for RevealedSparseTrie { impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. - pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { + pub fn from_root( + node: TrieNode, + hash_mask: Option, + retain_updates: bool, + ) -> SparseTrieResult { let mut this = Self { provider: Default::default(), nodes: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node)?; + this.reveal_node(Nibbles::default(), node, hash_mask)?; Ok(this) } } @@ -210,18 +222,20 @@ impl

RevealedSparseTrie

{ pub fn from_provider_and_root( provider: P, node: TrieNode, + hash_mask: Option, retain_updates: bool, ) -> SparseTrieResult { let mut this = Self { provider, nodes: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node)?; + this.reveal_node(Nibbles::default(), node, hash_mask)?; Ok(this) } @@ -230,6 +244,7 @@ impl

RevealedSparseTrie

{ RevealedSparseTrie { provider, nodes: self.nodes, + branch_node_hash_masks: self.branch_node_hash_masks, values: self.values, prefix_set: self.prefix_set, updates: self.updates, @@ -261,7 +276,16 @@ impl

RevealedSparseTrie

{ } /// Reveal the trie node only if it was not known already. - pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { + pub fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + hash_mask: Option, + ) -> SparseTrieResult<()> { + if let Some(hash_mask) = hash_mask { + self.branch_node_hash_masks.insert(path.clone(), hash_mask); + } + match node { TrieNode::EmptyRoot => { debug_assert!(path.is_empty()); @@ -345,7 +369,7 @@ impl

RevealedSparseTrie

{ return Ok(()) } - self.reveal_node(path, TrieNode::decode(&mut &child[..])?) + self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None) } /// Update the leaf node with provided value. @@ -720,25 +744,44 @@ impl

RevealedSparseTrie

{ // Update the masks only if we need to retain trie updates if self.updates.is_some() { // Set the trie mask - if node_type.store_in_db_trie() { + let tree_mask_value = if node_type.store_in_db_trie() { // A branch or an extension node explicitly set the // `store_in_db_trie` flag - tree_mask_values.push(true); + true } else { // Set the flag according to whether a child node was - // pre-calculated - // (`calculated = false`), meaning that it wasn't in the - // database - tree_mask_values.push(!calculated); - } - - // Set the hash mask. If a child node has a hash value AND is a - // branch node, set the hash mask and save the hash. - let hash = child.as_hash().filter(|_| node_type.is_branch()); - hash_mask_values.push(hash.is_some()); + // pre-calculated (`calculated = false`), meaning that it wasn't + // in the database + !calculated + }; + tree_mask_values.push(tree_mask_value); + + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + node_type.is_branch() || + (node_type.is_hash() && + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| { + mask.is_bit_set(child_path.last().unwrap()) + })) + }); + let hash_mask_value = hash.is_some(); + hash_mask_values.push(hash_mask_value); if let Some(hash) = hash { hashes.push(hash); } + + trace!( + target: "trie::sparse", + ?path, + ?child_path, + ?tree_mask_value, + ?hash_mask_value, + "Updating branch node child masks" + ); } // Insert children in the resulting buffer in a normal order, @@ -933,7 +976,10 @@ where if let Some(node) = self.provider.blinded_node(child_path.clone())? { let decoded = TrieNode::decode(&mut &node[..])?; trace!(target: "trie::sparse", ?child_path, ?decoded, "Revealing remaining blinded branch child"); - self.reveal_node(child_path.clone(), decoded)?; + // We'll never have to update the revealed branch node, only remove + // or do nothing, so we can safely ignore the hash mask here and + // pass `None`. + self.reveal_node(child_path.clone(), decoded, None)?; } } @@ -1029,6 +1075,10 @@ enum SparseNodeType { } impl SparseNodeType { + const fn is_hash(&self) -> bool { + matches!(self, Self::Hash) + } + const fn is_branch(&self) -> bool { matches!(self, Self::Branch { .. }) } @@ -1215,7 +1265,7 @@ mod tests { state: impl IntoIterator + Clone, destroyed_accounts: HashSet, proof_targets: impl IntoIterator, - ) -> (B256, TrieUpdates, ProofNodes) { + ) -> (B256, TrieUpdates, ProofNodes, HashMap) { let mut account_rlp = Vec::new(); let mut hash_builder = HashBuilder::default() @@ -1255,12 +1305,19 @@ mod tests { } let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (path.clone(), node.hash_mask)) + .collect(); let mut trie_updates = TrieUpdates::default(); let removed_keys = node_iter.walker.take_removed_keys(); trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); - (root, trie_updates, proof_nodes) + (root, trie_updates, proof_nodes, branch_node_hash_masks) } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -1322,7 +1379,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = run_hash_builder([(key.clone(), value())], Default::default(), [key.clone()]); let mut sparse = RevealedSparseTrie::default().with_updates(true); @@ -1347,11 +1404,12 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(value)), - Default::default(), - paths.clone(), - ); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { @@ -1375,11 +1433,12 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(value)), - Default::default(), - paths.clone(), - ); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { @@ -1411,11 +1470,12 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( - paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), - Default::default(), - paths.clone(), - ); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { @@ -1448,11 +1508,12 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), - Default::default(), - paths.clone(), - ); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), + Default::default(), + paths.clone(), + ); let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { @@ -1465,11 +1526,12 @@ mod tests { assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( - paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), - Default::default(), - paths.clone(), - ); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), + Default::default(), + paths.clone(), + ); for path in &paths { sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); @@ -1737,15 +1799,17 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); + let mut sparse = + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + .unwrap(); // Reveal a branch node and one of its children // // Branch (Mask = 11) // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) - sparse.reveal_node(Nibbles::default(), branch).unwrap(); - sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); // Removing a blinded leaf should result in an error assert_matches!( @@ -1768,15 +1832,17 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); + let mut sparse = + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + .unwrap(); // Reveal a branch node and one of its children // // Branch (Mask = 11) // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) - sparse.reveal_node(Nibbles::default(), branch).unwrap(); - sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); // Removing a non-existent leaf should be a noop let sparse_old = sparse.clone(); @@ -1814,7 +1880,7 @@ mod tests { // Insert state updates into the hash builder and calculate the root state.extend(update); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = run_hash_builder( state.clone(), Default::default(), @@ -1845,7 +1911,7 @@ mod tests { let sparse_root = updated_sparse.root(); let sparse_updates = updated_sparse.take_updates(); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = run_hash_builder( state.clone(), Default::default(), @@ -1926,22 +1992,24 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( [(key1(), value()), (key3(), value())], Default::default(), [Nibbles::default()], ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key1()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); } // Check that the branch node exists with only two nibbles set @@ -1960,10 +2028,11 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key3()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); } // Check that nothing changed in the branch node @@ -1974,7 +2043,7 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, _) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], Default::default(), [key1(), key2(), key3()], @@ -2001,26 +2070,28 @@ mod tests { let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], Default::default(), [Nibbles::default()], ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], Default::default(), [key1(), Nibbles::from_nibbles_unchecked([0x01])], ); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); } // Check that the branch node exists @@ -2039,13 +2110,14 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], Default::default(), [key2()], ); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); } // Check that nothing changed in the extension node @@ -2076,13 +2148,14 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( [(key1(), value()), (key2(), value())], Default::default(), [Nibbles::default()], ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); @@ -2103,10 +2176,11 @@ mod tests { ); // Generate the proof for the first key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder([(key1(), value()), (key2(), value())], Default::default(), [key1()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); } // Check that the branch node wasn't overwritten by the extension node in the proof @@ -2200,7 +2274,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, _) = run_hash_builder( + let (hash_builder_root, hash_builder_updates, _, _) = run_hash_builder( [(key1(), value()), (key2(), value())], Default::default(), [Nibbles::default()], From 242bbaa9c460d90b5a307926d9810a22d1585cfc Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 6 Dec 2024 09:05:10 +0000 Subject: [PATCH 904/970] fix: throw error if genesis header found on static files, but empty db (#13157) --- crates/node/builder/src/launch/common.rs | 6 ++-- crates/storage/db-common/src/init.rs | 42 +++++++++++++++--------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index f4557bd2272..648edc1938e 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -14,7 +14,7 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; -use reth_db_common::init::{init_genesis, InitDatabaseError}; +use reth_db_common::init::{init_genesis, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; @@ -542,13 +542,13 @@ where } /// Convenience function to [`Self::init_genesis`] - pub fn with_genesis(self) -> Result { + pub fn with_genesis(self) -> Result { init_genesis(self.provider_factory())?; Ok(self) } /// Write the genesis block and state if it has not already been written - pub fn init_genesis(&self) -> Result { + pub fn init_genesis(&self) -> Result { init_genesis(self.provider_factory()) } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 493b27be780..95b2a5d5c4a 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -16,8 +16,8 @@ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateWriter, - StaticFileProviderFactory, StorageLocation, TrieWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, + StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -43,17 +43,20 @@ pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; /// Soft limit for the number of flushed updates after which to log progress summary. const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000; -/// Database initialization error type. +/// Storage initialization error type. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] -pub enum InitDatabaseError { +pub enum InitStorageError { + /// Genesis header found on static files but the database is empty. + #[error("static files found, but the database is uninitialized. If attempting to re-syncing, delete both.")] + UninitializedDatabase, /// An existing genesis block was found in the database, and its hash did not match the hash of /// the chainspec. - #[error("genesis hash in the database does not match the specified chainspec: chainspec is {chainspec_hash}, database is {database_hash}")] + #[error("genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}")] GenesisHashMismatch { /// Expected genesis hash. chainspec_hash: B256, /// Actual genesis hash. - database_hash: B256, + storage_hash: B256, }, /// Provider error. #[error(transparent)] @@ -63,18 +66,19 @@ pub enum InitDatabaseError { StateRootMismatch(GotExpected), } -impl From for InitDatabaseError { +impl From for InitStorageError { fn from(error: DatabaseError) -> Self { Self::Provider(ProviderError::Database(error)) } } /// Write the genesis block if it has not already been written -pub fn init_genesis(factory: &PF) -> Result +pub fn init_genesis(factory: &PF) -> Result where PF: DatabaseProviderFactory + StaticFileProviderFactory> + ChainSpecProvider + + StageCheckpointReader + BlockHashReader, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter @@ -96,13 +100,21 @@ where Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {} Ok(Some(block_hash)) => { if block_hash == hash { + // Some users will at times attempt to re-sync from scratch by just deleting the + // database. Since `factory.block_hash` will only query the static files, we need to + // make sure that our database has been written to, and throw error if it's empty. + if factory.get_stage_checkpoint(StageId::Headers)?.is_none() { + error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized."); + return Err(InitStorageError::UninitializedDatabase) + } + debug!("Genesis already written, skipping."); return Ok(hash) } - return Err(InitDatabaseError::GenesisHashMismatch { + return Err(InitStorageError::GenesisHashMismatch { chainspec_hash: hash, - database_hash: block_hash, + storage_hash: block_hash, }) } Err(e) => { @@ -376,7 +388,7 @@ where ?expected_state_root, "State root from state dump does not match state root in current header." ); - return Err(InitDatabaseError::StateRootMismatch(GotExpected { + return Err(InitStorageError::StateRootMismatch(GotExpected { got: dump_state_root, expected: expected_state_root, }) @@ -409,7 +421,7 @@ where "Computed state root does not match state root in state dump" ); - return Err(InitDatabaseError::StateRootMismatch(GotExpected { + return Err(InitStorageError::StateRootMismatch(GotExpected { got: computed_state_root, expected: expected_state_root, }) @@ -622,7 +634,7 @@ mod tests { fn collect_table_entries( tx: &::TX, - ) -> Result>, InitDatabaseError> + ) -> Result>, InitStorageError> where DB: Database, T: Table, @@ -672,9 +684,9 @@ mod tests { assert_eq!( genesis_hash.unwrap_err(), - InitDatabaseError::GenesisHashMismatch { + InitStorageError::GenesisHashMismatch { chainspec_hash: MAINNET_GENESIS_HASH, - database_hash: SEPOLIA_GENESIS_HASH + storage_hash: SEPOLIA_GENESIS_HASH } ) } From 44a66d32b5225c9d1f30f999bcafabaa3fba5e49 Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Fri, 6 Dec 2024 12:12:24 +0200 Subject: [PATCH 905/970] chore: Code Simplification and Optimization (#12840) Co-authored-by: Matthias Seitz --- testing/ef-tests/src/models.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 742498e81bf..160b0ec1d0c 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -168,10 +168,15 @@ impl State { }; tx.put::(address, reth_account)?; tx.put::(hashed_address, reth_account)?; + if let Some(code_hash) = code_hash { tx.put::(code_hash, Bytecode::new_raw(account.code.clone()))?; } - account.storage.iter().filter(|(_, v)| !v.is_zero()).try_for_each(|(k, v)| { + + for (k, v) in &account.storage { + if v.is_zero() { + continue + } let storage_key = B256::from_slice(&k.to_be_bytes::<32>()); tx.put::( address, @@ -180,10 +185,9 @@ impl State { tx.put::( hashed_address, StorageEntry { key: keccak256(storage_key), value: *v }, - ) - })?; + )?; + } } - Ok(()) } } From d3e09c8c433795c43cade4d774022e1f670fe02f Mon Sep 17 00:00:00 2001 From: Elvis <43846394+Elvis339@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:11:29 +0400 Subject: [PATCH 906/970] perf: profile TransactionsManager::poll hash fetching (#12975) Co-authored-by: Matthias Seitz --- crates/net/network/Cargo.toml | 7 +- .../benches/{bench.rs => broadcast.rs} | 0 .../benches/tx_manager_hash_fetching.rs | 97 +++++++++++++++++++ crates/net/network/src/test_utils/testnet.rs | 55 ++++++++++- crates/net/network/tests/it/main.rs | 1 + .../tests/it/transaction_hash_fetching.rs | 68 +++++++++++++ 6 files changed, 225 insertions(+), 3 deletions(-) rename crates/net/network/benches/{bench.rs => broadcast.rs} (100%) create mode 100644 crates/net/network/benches/tx_manager_hash_fetching.rs create mode 100644 crates/net/network/tests/it/transaction_hash_fetching.rs diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index a4eff9d3a90..9e8c59226b2 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -141,6 +141,11 @@ test-utils = [ ] [[bench]] -name = "bench" +name = "broadcast" +required-features = ["test-utils"] +harness = false + +[[bench]] +name = "tx_manager_hash_fetching" required-features = ["test-utils"] harness = false diff --git a/crates/net/network/benches/bench.rs b/crates/net/network/benches/broadcast.rs similarity index 100% rename from crates/net/network/benches/bench.rs rename to crates/net/network/benches/broadcast.rs diff --git a/crates/net/network/benches/tx_manager_hash_fetching.rs b/crates/net/network/benches/tx_manager_hash_fetching.rs new file mode 100644 index 00000000000..1ab9b8fc427 --- /dev/null +++ b/crates/net/network/benches/tx_manager_hash_fetching.rs @@ -0,0 +1,97 @@ +#![allow(missing_docs)] +use alloy_primitives::U256; +use criterion::*; +use pprof::criterion::{Output, PProfProfiler}; +use rand::thread_rng; +use reth_network::{ + test_utils::Testnet, + transactions::{ + TransactionFetcherConfig, TransactionPropagationMode::Max, TransactionsManagerConfig, + }, +}; +use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use tokio::runtime::Runtime as TokioRuntime; + +criterion_group!( + name = tx_fetch_benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = tx_fetch_bench +); + +pub fn tx_fetch_bench(c: &mut Criterion) { + let rt = TokioRuntime::new().unwrap(); + + let mut group = c.benchmark_group("Transaction Fetch"); + group.sample_size(10); + + group.bench_function("fetch_transactions", |b| { + b.to_async(&rt).iter_with_setup( + || { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + let tx_manager_config = TransactionsManagerConfig { + propagation_mode: Max(0), + transaction_fetcher_config: TransactionFetcherConfig { + max_inflight_requests: 1, + ..Default::default() + }, + ..Default::default() + }; + + let provider = MockEthProvider::default(); + let num_peers = 10; + let net = Testnet::create_with(num_peers, provider.clone()).await; + + // install request handlers + let net = net.with_eth_pool_config(tx_manager_config); + let handle = net.spawn(); + + // connect all the peers first + handle.connect_peers().await; + + let listening_peer = &handle.peers()[num_peers - 1]; + let listening_peer_tx_listener = + listening_peer.pool().unwrap().pending_transactions_listener(); + + let num_tx_per_peer = 10; + + for i in 1..num_peers { + let peer = &handle.peers()[i]; + let peer_pool = peer.pool().unwrap(); + + for _ in 0..num_tx_per_peer { + let mut gen = TransactionGenerator::new(thread_rng()); + let tx = gen.gen_eip1559_pooled(); + let sender = tx.sender(); + provider.add_account( + sender, + ExtendedAccount::new(0, U256::from(100_000_000)), + ); + peer_pool.add_external_transaction(tx.clone()).await.unwrap(); + } + } + + // Total expected transactions + let total_expected_tx = num_tx_per_peer * (num_peers - 1); + + (listening_peer_tx_listener, total_expected_tx) + }) + }) + }, + |(mut listening_peer_tx_listener, total_expected_tx)| async move { + let mut received_tx = 0; + while listening_peer_tx_listener.recv().await.is_some() { + received_tx += 1; + if received_tx >= total_expected_tx { + break; + } + } + }, + ) + }); + + group.finish(); +} + +criterion_main!(tx_fetch_benches); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 3a50d890e70..7fd9f690fde 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -194,6 +194,27 @@ where )) }) } + + /// Installs an eth pool on each peer with custom transaction manager config + pub fn with_eth_pool_config( + self, + tx_manager_config: TransactionsManagerConfig, + ) -> Testnet> { + self.map_pool(|peer| { + let blob_store = InMemoryBlobStore::default(); + let pool = TransactionValidationTaskExecutor::eth( + peer.client.clone(), + MAINNET.clone(), + blob_store.clone(), + TokioTaskExecutor::default(), + ); + + peer.map_transactions_manager_with_config( + EthTransactionPool::eth_pool(pool, blob_store, Default::default()), + tx_manager_config.clone(), + ) + }) + } } impl Testnet @@ -463,6 +484,36 @@ where secret_key, } } + + /// Map transactions manager with custom config + pub fn map_transactions_manager_with_config

( + self, + pool: P, + config: TransactionsManagerConfig, + ) -> Peer + where + P: TransactionPool, + { + let Self { mut network, request_handler, client, secret_key, .. } = self; + let (tx, rx) = unbounded_channel(); + network.set_transactions(tx); + + let transactions_manager = TransactionsManager::new( + network.handle().clone(), + pool.clone(), + rx, + config, // Use provided config + ); + + Peer { + network, + request_handler, + transactions_manager: Some(transactions_manager), + pool: Some(pool), + client, + secret_key, + } + } } impl Peer @@ -682,7 +733,7 @@ impl NetworkEventStream { /// Awaits the next `num` events for an established session pub async fn take_session_established(&mut self, mut num: usize) -> Vec { if num == 0 { - return Vec::new() + return Vec::new(); } let mut peers = Vec::with_capacity(num); while let Some(ev) = self.inner.next().await { @@ -691,7 +742,7 @@ impl NetworkEventStream { peers.push(peer_id); num -= 1; if num == 0 { - return peers + return peers; } } _ => continue, diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index ede445510c2..dd98c9624d6 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -6,6 +6,7 @@ mod multiplex; mod requests; mod session; mod startup; +mod transaction_hash_fetching; mod txgossip; const fn main() {} diff --git a/crates/net/network/tests/it/transaction_hash_fetching.rs b/crates/net/network/tests/it/transaction_hash_fetching.rs new file mode 100644 index 00000000000..7f1d7593a22 --- /dev/null +++ b/crates/net/network/tests/it/transaction_hash_fetching.rs @@ -0,0 +1,68 @@ +use alloy_primitives::U256; +use rand::thread_rng; +use reth_network::{ + test_utils::Testnet, + transactions::{TransactionPropagationMode::Max, TransactionsManagerConfig}, +}; +use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +use reth_tracing::init_test_tracing; +use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use tokio::time::Duration; + +#[tokio::test(flavor = "multi_thread")] +#[ignore] +async fn transaction_hash_fetching() { + init_test_tracing(); + + let mut config = TransactionsManagerConfig { propagation_mode: Max(0), ..Default::default() }; + config.transaction_fetcher_config.max_inflight_requests = 1; + + let provider = MockEthProvider::default(); + let num_peers = 10; + let net = Testnet::create_with(num_peers, provider.clone()).await; + + // install request handlers + let net = net.with_eth_pool_config(config); + let handle = net.spawn(); + + // connect all the peers first + handle.connect_peers().await; + + let listening_peer = &handle.peers()[num_peers - 1]; + let mut listening_peer_tx_listener = + listening_peer.pool().unwrap().pending_transactions_listener(); + + let num_tx_per_peer = 10; + + // Generate transactions for peers + for i in 1..num_peers { + let peer = &handle.peers()[i]; + let peer_pool = peer.pool().unwrap(); + + for _ in 0..num_tx_per_peer { + let mut gen = TransactionGenerator::new(thread_rng()); + let tx = gen.gen_eip1559_pooled(); + let sender = tx.sender(); + provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); + peer_pool.add_external_transaction(tx).await.unwrap(); + } + } + + // Total expected transactions + let total_expected_tx = num_tx_per_peer * (num_peers - 1); + let mut received_tx = 0; + + loop { + tokio::select! { + Some(_) = listening_peer_tx_listener.recv() => { + received_tx += 1; + if received_tx >= total_expected_tx { + break; + } + } + _ = tokio::time::sleep(Duration::from_secs(10)) => { + panic!("Timed out waiting for transactions. Received {received_tx}/{total_expected_tx}"); + } + } + } +} From ab87f22cabd4c7eb54e260c7158c6170eb951579 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 11:44:40 +0100 Subject: [PATCH 907/970] fix: push job front of queue (#13177) --- crates/exex/exex/src/backfill/job.rs | 3 ++- crates/exex/exex/src/backfill/stream.rs | 24 +++++++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index f93c5efa721..5888368e3c2 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -25,7 +25,8 @@ pub(super) type BackfillJobResult = Result; /// Backfill job started for a specific range. /// /// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds -/// and yields [`Chain`] +/// and yields [`Chain`]. In other words, this iterator can yield multiple items for the given range +/// depending on the configured thresholds. #[derive(Debug)] pub struct BackfillJob { pub(crate) executor: E, diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 95da076c7c8..d88ca87e7ac 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -77,15 +77,24 @@ where self } - /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the - /// [`BackfillTasks`] queue. - fn push_task(&mut self, mut job: BackfillTaskIterator) { + /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the end + /// of the [`BackfillTasks`] queue. + fn push_back(&mut self, mut job: BackfillTaskIterator) { self.tasks.push_back(tokio::task::spawn_blocking(move || BackfillTaskOutput { result: job.next(), job, })); } + /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the + /// front of the [`BackfillTasks`] queue. + fn push_front(&mut self, mut job: BackfillTaskIterator) { + self.tasks.push_front(tokio::task::spawn_blocking(move || BackfillTaskOutput { + result: job.next(), + job, + })); + } + /// Polls the next task in the [`BackfillTasks`] queue until it returns a non-empty result. fn poll_next_task(&mut self, cx: &mut Context<'_>) -> Poll>> { while let Some(res) = ready!(self.tasks.poll_next_unpin(cx)) { @@ -93,8 +102,9 @@ where if let BackfillTaskOutput { result: Some(job_result), job } = task_result { // If the task returned a non-empty result, a new task advancing the job is created - // and pushed to the front of the queue. - self.push_task(job); + // and pushed to the __front__ of the queue, so that the next item of this returned + // next. + self.push_front(job); return Poll::Ready(Some(job_result)) }; @@ -130,7 +140,7 @@ where range: block_number..=block_number, stream_parallelism: this.parallelism, }) as BackfillTaskIterator<_>; - this.push_task(job); + this.push_back(job); } this.poll_next_task(cx) @@ -170,7 +180,7 @@ where range, stream_parallelism: this.parallelism, }) as BackfillTaskIterator<_>; - this.push_task(job); + this.push_back(job); } this.poll_next_task(cx) From cf2a6a1ee8e8375b107627e7525a726405a5053f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 6 Dec 2024 16:30:50 +0400 Subject: [PATCH 908/970] feat: EthApi traits abstraction (#13170) --- Cargo.lock | 2 + crates/e2e-test-utils/src/node.rs | 4 +- crates/e2e-test-utils/src/rpc.rs | 5 +- crates/node/builder/src/launch/common.rs | 1 + crates/node/builder/src/rpc.rs | 24 ++- crates/optimism/rpc/src/eth/block.rs | 21 ++- crates/optimism/rpc/src/eth/call.rs | 4 +- crates/optimism/rpc/src/eth/mod.rs | 17 +- crates/optimism/rpc/src/eth/pending_block.rs | 116 +++++++++--- crates/optimism/rpc/src/eth/transaction.rs | 8 +- crates/primitives-traits/src/block/mod.rs | 3 + crates/primitives/src/block.rs | 26 --- crates/primitives/src/transaction/mod.rs | 5 + crates/primitives/src/transaction/pooled.rs | 12 ++ crates/rpc/rpc-api/src/otterscan.rs | 11 +- crates/rpc/rpc-builder/src/lib.rs | 54 ++++-- crates/rpc/rpc-builder/tests/it/http.rs | 160 +++++++++------- crates/rpc/rpc-builder/tests/it/middleware.rs | 4 +- crates/rpc/rpc-eth-api/Cargo.toml | 1 + crates/rpc/rpc-eth-api/src/core.rs | 30 ++- crates/rpc/rpc-eth-api/src/helpers/block.rs | 28 ++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 21 ++- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 3 +- .../rpc-eth-api/src/helpers/pending_block.rs | 178 +++++++----------- crates/rpc/rpc-eth-api/src/helpers/signer.rs | 11 +- crates/rpc/rpc-eth-api/src/helpers/spec.rs | 5 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 10 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 41 ++-- .../rpc-eth-api/src/helpers/transaction.rs | 14 +- crates/rpc/rpc-eth-api/src/lib.rs | 2 +- crates/rpc/rpc-eth-api/src/types.rs | 5 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 10 +- crates/rpc/rpc-testing-util/src/debug.rs | 4 +- crates/rpc/rpc-testing-util/tests/it/trace.rs | 4 +- crates/rpc/rpc-types-compat/Cargo.toml | 1 + crates/rpc/rpc-types-compat/src/block.rs | 78 +++++--- crates/rpc/rpc/src/debug.rs | 56 +++--- crates/rpc/rpc/src/engine.rs | 3 +- crates/rpc/rpc/src/eth/bundle.rs | 31 +-- crates/rpc/rpc/src/eth/core.rs | 20 +- crates/rpc/rpc/src/eth/helpers/block.rs | 25 ++- crates/rpc/rpc/src/eth/helpers/call.rs | 4 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 94 ++++++++- crates/rpc/rpc/src/eth/helpers/signer.rs | 29 +-- crates/rpc/rpc/src/eth/helpers/spec.rs | 11 +- crates/rpc/rpc/src/eth/helpers/trace.rs | 11 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 6 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 16 +- crates/rpc/rpc/src/otterscan.rs | 33 +++- crates/rpc/rpc/src/trace.rs | 65 ++++--- .../provider/src/providers/consistent.rs | 6 +- crates/storage/storage-api/src/header.rs | 3 + crates/transaction-pool/src/traits.rs | 10 +- 53 files changed, 843 insertions(+), 503 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 666293e58e6..37d05d1e0c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9052,6 +9052,7 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-primitives", + "alloy-rlp", "alloy-rpc-types-eth", "alloy-rpc-types-mev", "alloy-serde", @@ -9171,6 +9172,7 @@ dependencies = [ "alloy-rpc-types-eth", "jsonrpsee-types", "reth-primitives", + "reth-primitives-traits", "serde", "serde_json", ] diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index dcd24df5c7a..b5dd44841dc 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -97,7 +97,9 @@ where where Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, - AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt, + AddOns::EthApi: EthApiSpec> + + EthTransactions + + TraceExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 37ee12987ca..cdc72a29538 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -4,6 +4,7 @@ use alloy_primitives::{Bytes, B256}; use reth_chainspec::EthereumHardforks; use reth_node_api::{FullNodeComponents, NodePrimitives}; use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; +use reth_provider::BlockReader; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, @@ -26,7 +27,9 @@ where >, >, >, - EthApi: EthApiSpec + EthTransactions + TraceExt, + EthApi: EthApiSpec> + + EthTransactions + + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server pub async fn inject_tx(&self, raw_tx: Bytes) -> Result { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 648edc1938e..104ecef9e80 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -927,6 +927,7 @@ where alloy_rpc_types::Transaction, alloy_rpc_types::Block, alloy_rpc_types::Receipt, + alloy_rpc_types::Header, >::chain_id(&client) .await })? diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index edc10fecc58..a4010e52db3 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -19,7 +19,7 @@ use reth_node_core::{ }; use reth_payload_builder::PayloadStore; use reth_primitives::EthPrimitives; -use reth_provider::providers::ProviderNodeTypes; +use reth_provider::{providers::ProviderNodeTypes, BlockReader}; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, EthApi, @@ -408,7 +408,16 @@ where PayloadBuilder: PayloadBuilder::Engine>, Pool: TransactionPool::Transaction>, >, - EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EthApi: EthApiTypes + + FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + > + AddDevSigners + + Unpin + + 'static, EV: EngineValidatorBuilder, { /// Launches the RPC servers with the given context and an additional hook for extending @@ -531,7 +540,16 @@ where PayloadBuilder: PayloadBuilder::Engine>, Pool: TransactionPool::Transaction>, >, - EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EthApi: EthApiTypes + + FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + > + AddDevSigners + + Unpin + + 'static, EV: EngineValidatorBuilder, { type Handle = RpcHandle; diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index a37a8a15264..3899e0b7f5c 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,12 +1,14 @@ //! Loads and formats OP block RPC response. +use alloy_consensus::BlockHeader; use alloy_rpc_types_eth::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; +use reth_node_api::BlockBody; use reth_optimism_chainspec::OpChainSpec; -use reth_primitives::TransactionMeta; -use reth_provider::HeaderProvider; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_provider::{BlockReader, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, @@ -19,6 +21,7 @@ where Self: LoadBlock< Error = OpEthApiError, NetworkTypes: Network, + Provider: BlockReader, >, N: OpNodeCore + HeaderProvider>, { @@ -30,22 +33,22 @@ where Self: LoadReceipt, { if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number; - let base_fee = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); let l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; return block .body - .transactions - .into_iter() + .transactions() + .iter() .zip(receipts.iter()) .enumerate() - .map(|(idx, (ref tx, receipt))| -> Result<_, _> { + .map(|(idx, (tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { tx_hash: tx.hash(), index: idx as u64, diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index c9d874f7392..f7691756408 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,9 +1,9 @@ use super::OpNodeCore; use crate::{OpEthApi, OpEthApiError}; -use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; +use reth_provider::ProviderHeader; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, @@ -28,7 +28,7 @@ where impl Call for OpEthApi where - Self: LoadState> + SpawnBlocking, + Self: LoadState>> + SpawnBlocking, Self::Error: From, N: OpNodeCore, { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 0e657bf0440..4304a2a3741 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,6 @@ use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; -use alloy_consensus::Header; use alloy_primitives::U256; use op_alloy_network::Optimism; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -22,8 +21,8 @@ use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; use reth_provider::{ BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - EvmEnvProvider, NodePrimitivesProvider, ProviderBlock, ProviderReceipt, StageCheckpointReader, - StateProviderFactory, + EvmEnvProvider, NodePrimitivesProvider, ProviderBlock, ProviderHeader, ProviderReceipt, + ProviderTx, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -155,13 +154,15 @@ where Network: NetworkInfo, >, { + type Transaction = ProviderTx; + #[inline] fn starting_block(&self) -> U256 { self.inner.eth_api.starting_block() } #[inline] - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers(&self) -> &parking_lot::RwLock>>>> { self.inner.eth_api.signers() } } @@ -236,7 +237,13 @@ where impl Trace for OpEthApi where - Self: RpcNodeCore + LoadState>, + Self: RpcNodeCore + + LoadState< + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, + >, N: OpNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index eebb61c8cb0..5c437de76a7 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,28 +1,36 @@ //! Loads OP pending block for a RPC response. use crate::OpEthApi; -use alloy_consensus::Header; -use alloy_eips::BlockNumberOrTag; -use alloy_primitives::{BlockNumber, B256}; +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, proofs::calculate_transaction_root, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, BlockNumberOrTag}; +use alloy_primitives::{B256, U256}; +use op_alloy_network::Network; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{logs_bloom, BlockBody, Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ProviderTx, - ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, + ProviderHeader, ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, - FromEthApiError, RpcNodeCore, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::primitives::BlockEnv; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, SpecId}; impl LoadPendingBlock for OpEthApi where - Self: SpawnBlocking, + Self: SpawnBlocking + + EthApiTypes< + NetworkTypes: Network< + HeaderResponse = alloy_rpc_types_eth::Header>, + >, + >, N: RpcNodeCore< Provider: BlockReaderIdExt< Transaction = reth_primitives::TransactionSigned, @@ -37,7 +45,11 @@ where >, { #[inline] - fn pending_block(&self) -> &tokio::sync::Mutex> { + fn pending_block( + &self, + ) -> &tokio::sync::Mutex< + Option, ProviderReceipt>>, + > { self.inner.eth_api.pending_block() } @@ -68,20 +80,76 @@ where Ok(Some((block, receipts))) } - fn receipts_root( + fn assemble_block( + &self, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: B256, + state_root: B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> reth_provider::ProviderBlock { + let chain_spec = self.provider().chain_spec(); + + let transactions_root = calculate_transaction_root(&transactions); + let receipts_root = calculate_receipt_root_no_memo_optimism( + &receipts.iter().collect::>(), + &chain_spec, + block_env.timestamp.to::(), + ); + + let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + + let header = Header { + parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) + .then_some(EMPTY_WITHDRAWALS), + logs_bloom, + timestamp: block_env.timestamp.to::(), + mix_hash: block_env.prevrandao.unwrap_or_default(), + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(block_env.basefee.to::()), + number: block_env.number.to::(), + gas_limit: block_env.gas_limit.to::(), + difficulty: U256::ZERO, + gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), + blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() + }), + excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), + extra_data: Default::default(), + parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) + .then_some(B256::ZERO), + requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) + .then_some(EMPTY_REQUESTS_HASH), + target_blobs_per_block: None, + }; + + // seal the block + reth_primitives::Block { + header, + body: BlockBody { transactions, ommers: vec![], withdrawals: None }, + } + } + + fn assemble_receipt( &self, - block_env: &BlockEnv, - execution_outcome: &ExecutionOutcome, - block_number: BlockNumber, - ) -> B256 { - execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism( - receipts, - self.provider().chain_spec().as_ref(), - block_env.timestamp.to::(), - ) - }) - .expect("Block is present") + tx: &reth_primitives::RecoveredTx>, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> reth_provider::ProviderReceipt { + #[allow(clippy::needless_update)] + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + ..Default::default() + } } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index cfc81ab644e..c1e0a730198 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -7,7 +7,9 @@ use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; use reth_primitives::{RecoveredTx, TransactionSigned}; -use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, +}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, @@ -20,9 +22,9 @@ use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where Self: LoadTransaction, - N: OpNodeCore, + N: OpNodeCore>>, { - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers(&self) -> &parking_lot::RwLock>>>> { self.inner.eth_api.signers() } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index fcbf02a76c6..53afc737768 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -4,6 +4,7 @@ pub mod body; pub mod header; use alloc::fmt; +use alloy_rlp::{Decodable, Encodable}; use crate::{ BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, @@ -39,6 +40,8 @@ pub trait Block: + InMemorySize + MaybeSerde + MaybeArbitrary + + Encodable + + Decodable { /// Header part of the block. type Header: BlockHeader; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 9edbb2471ef..799e3e7a4c9 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -447,31 +447,6 @@ where } } -impl reth_primitives_traits::Block for SealedBlock -where - H: reth_primitives_traits::BlockHeader, - B: reth_primitives_traits::BlockBody, -{ - type Header = H; - type Body = B; - - fn new(header: Self::Header, body: Self::Body) -> Self { - Self { header: SealedHeader::seal(header), body } - } - - fn header(&self) -> &Self::Header { - self.header.header() - } - - fn body(&self) -> &Self::Body { - &self.body - } - - fn split(self) -> (Self::Header, Self::Body) { - (self.header.unseal(), self.body) - } -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock where @@ -1022,7 +997,6 @@ mod tests { const fn _traits() { const fn assert_block() {} assert_block::(); - assert_block::(); } /// Check parsing according to EIP-1898. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d0b88c4b179..f7211489e93 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1506,6 +1506,11 @@ impl RecoveredTx { pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { Self { signed_transaction, signer } } + + /// Applies the given closure to the inner transactions. + pub fn map_transaction(self, f: impl FnOnce(T) -> Tx) -> RecoveredTx { + RecoveredTx::from_signed_transaction(f(self.signed_transaction), self.signer) + } } impl Encodable for RecoveredTx { diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index cdcc6b808dd..93a3c182322 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -654,6 +654,18 @@ impl TryFrom for PooledTransactionsElement { } } +impl From for TransactionSigned { + fn from(element: PooledTransactionsElement) -> Self { + match element { + PooledTransactionsElement::Legacy(tx) => tx.into(), + PooledTransactionsElement::Eip2930(tx) => tx.into(), + PooledTransactionsElement::Eip1559(tx) => tx.into(), + PooledTransactionsElement::Eip7702(tx) => tx.into(), + PooledTransactionsElement::BlobTransaction(blob_tx) => blob_tx.into_parts().0, + } + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { /// Generates an arbitrary `PooledTransactionsElement`. diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index b4679ae5cec..eb2cb21a2ba 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,7 +1,6 @@ use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types_eth::Header; use alloy_rpc_types_trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, @@ -11,7 +10,7 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))] -pub trait Otterscan { +pub trait Otterscan { /// Get the block header by block number, required by otterscan. /// Otterscan currently requires this endpoint, used as: /// @@ -20,7 +19,7 @@ pub trait Otterscan { /// /// Ref: #[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])] - async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; + async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] @@ -48,11 +47,11 @@ pub trait Otterscan { /// Tailor-made and expanded version of eth_getBlockByNumber for block details page in /// Otterscan. #[method(name = "getBlockDetails")] - async fn get_block_details(&self, block_number: u64) -> RpcResult; + async fn get_block_details(&self, block_number: u64) -> RpcResult>; /// Tailor-made and expanded version of eth_getBlockByHash for block details page in Otterscan. #[method(name = "getBlockDetailsByHash")] - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult; + async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult>; /// Get paginated transactions for a certain block. Also remove some verbose fields like logs. #[method(name = "getBlockTransactions")] @@ -61,7 +60,7 @@ pub trait Otterscan { block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult>; + ) -> RpcResult>; /// Gets paginated inbound/outbound transaction calls for a certain address. #[method(name = "searchTransactionsBefore")] diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 1d8bfb9c297..df25564486f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -207,7 +207,8 @@ use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, HeaderProvider, ReceiptProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, HeaderProvider, ProviderBlock, ProviderHeader, + ProviderReceipt, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, @@ -216,7 +217,7 @@ use reth_rpc::{ use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, - EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcReceipt, RpcTransaction, + EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; @@ -276,9 +277,9 @@ pub async fn launch Result where Provider: FullRpcProvider< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ProviderBlock, + Receipt = ProviderReceipt, + Header = ProviderHeader, > + AccountReader + ChangeSetReader, Pool: TransactionPool::Transaction> + 'static, @@ -286,7 +287,13 @@ where Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm

, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, @@ -672,7 +679,13 @@ where where EngineT: EngineTypes, EngineApi: EngineApiServer, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + >, Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, @@ -792,7 +805,13 @@ where eth: DynEthApiBuilder, ) -> TransportRpcModules<()> where - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + >, Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, @@ -1134,6 +1153,7 @@ where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + EthApiTypes, BlockExecutor: BlockExecutorProvider>, @@ -1170,10 +1190,16 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_debug(&mut self) -> &mut Self where - EthApi: EthApiSpec + EthTransactions + TraceExt, + EthApi: EthApiSpec + + EthTransactions< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + > + TraceExt, Provider: BlockReader< Block = ::Block, - Receipt = reth_primitives::Receipt, + Receipt = ::Receipt, >, { let debug_api = self.debug_api(); @@ -1339,7 +1365,13 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, + >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index a8393b0a92e..357e3135e04 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -5,7 +5,7 @@ use crate::utils::{launch_http, launch_http_ws, launch_ws}; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; use alloy_rpc_types_eth::{ - transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, + transaction::TransactionRequest, Block, FeeHistory, Filter, Header, Index, Log, PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt, }; use alloy_rpc_types_trace::filter::TraceFilter; @@ -174,16 +174,24 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version(client).await.unwrap(); - EthApiClient::::chain_id(client).await.unwrap(); - EthApiClient::::accounts(client).await.unwrap(); - EthApiClient::::get_account(client, address, block_number.into()) + EthApiClient::::protocol_version(client).await.unwrap(); + EthApiClient::::chain_id(client).await.unwrap(); + EthApiClient::::accounts(client).await.unwrap(); + EthApiClient::::get_account( + client, + address, + block_number.into(), + ) + .await + .unwrap(); + EthApiClient::::block_number(client).await.unwrap(); + EthApiClient::::get_code(client, address, None) + .await + .unwrap(); + EthApiClient::::send_raw_transaction(client, tx) .await .unwrap(); - EthApiClient::::block_number(client).await.unwrap(); - EthApiClient::::get_code(client, address, None).await.unwrap(); - EthApiClient::::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::::fee_history( + EthApiClient::::fee_history( client, U64::from(0), block_number, @@ -191,11 +199,13 @@ where ) .await .unwrap(); - EthApiClient::::balance(client, address, None).await.unwrap(); - EthApiClient::::transaction_count(client, address, None) + EthApiClient::::balance(client, address, None) + .await + .unwrap(); + EthApiClient::::transaction_count(client, address, None) .await .unwrap(); - EthApiClient::::storage_at( + EthApiClient::::storage_at( client, address, U256::default().into(), @@ -203,64 +213,79 @@ where ) .await .unwrap(); - EthApiClient::::block_by_hash(client, hash, false).await.unwrap(); - EthApiClient::::block_by_number(client, block_number, false) + EthApiClient::::block_by_hash(client, hash, false) .await .unwrap(); - EthApiClient::::block_transaction_count_by_number( + EthApiClient::::block_by_number( client, block_number, + false, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash(client, hash) - .await - .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash) - .await - .unwrap(); - EthApiClient::::block_uncles_count_by_number(client, block_number) - .await - .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index(client, hash, index) + EthApiClient::::block_transaction_count_by_number( + client, + block_number, + ) + .await + .unwrap(); + EthApiClient::::block_transaction_count_by_hash( + client, hash, + ) + .await + .unwrap(); + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::block_uncles_count_by_number( + client, + block_number, + ) + .await + .unwrap(); + EthApiClient::::uncle_by_block_hash_and_index( + client, hash, index, + ) + .await + .unwrap(); + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign(client, address, bytes.clone()) - .await - .unwrap_err(); - EthApiClient::::sign_typed_data(client, address, typed_data) + EthApiClient::::sign(client, address, bytes.clone()) .await .unwrap_err(); - EthApiClient::::transaction_by_hash(client, tx_hash) + EthApiClient::::sign_typed_data( + client, address, typed_data, + ) + .await + .unwrap_err(); + EthApiClient::::transaction_by_hash(client, tx_hash) .await .unwrap(); - EthApiClient::::transaction_by_block_hash_and_index( + EthApiClient::::transaction_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), ) .await .unwrap_err(); - EthApiClient::::estimate_gas( + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), @@ -268,7 +293,7 @@ where ) .await .unwrap_err(); - EthApiClient::::call( + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -277,44 +302,47 @@ where ) .await .unwrap_err(); - EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction( + EthApiClient::::syncing(client).await.unwrap(); + EthApiClient::::send_transaction( client, transaction_request.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_transaction(client, transaction_request) - .await - .unwrap_err(); - EthApiClient::::hashrate(client).await.unwrap(); - EthApiClient::::submit_hashrate( + EthApiClient::::sign_transaction( + client, + transaction_request, + ) + .await + .unwrap_err(); + EthApiClient::::hashrate(client).await.unwrap(); + EthApiClient::::submit_hashrate( client, U256::default(), B256::default(), ) .await .unwrap(); - EthApiClient::::gas_price(client).await.unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client) + EthApiClient::::gas_price(client).await.unwrap_err(); + EthApiClient::::max_priority_fee_per_gas(client) .await .unwrap_err(); - EthApiClient::::get_proof(client, address, vec![], None) + EthApiClient::::get_proof(client, address, vec![], None) .await .unwrap(); // Unimplemented assert!(is_unimplemented( - EthApiClient::::author(client).await.err().unwrap() + EthApiClient::::author(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::is_mining(client).await.err().unwrap() + EthApiClient::::is_mining(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::get_work(client).await.err().unwrap() + EthApiClient::::get_work(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::submit_work( + EthApiClient::::submit_work( client, B64::default(), B256::default(), @@ -402,28 +430,32 @@ where let nonce = 1; let block_hash = B256::default(); - OtterscanClient::::get_header_by_number(client, block_number).await.unwrap(); + OtterscanClient::::get_header_by_number(client, block_number) + .await + .unwrap(); - OtterscanClient::::has_code(client, address, None).await.unwrap(); - OtterscanClient::::has_code(client, address, Some(block_number.into())) + OtterscanClient::::has_code(client, address, None).await.unwrap(); + OtterscanClient::::has_code(client, address, Some(block_number.into())) .await .unwrap(); - OtterscanClient::::get_api_level(client).await.unwrap(); + OtterscanClient::::get_api_level(client).await.unwrap(); - OtterscanClient::::get_internal_operations(client, tx_hash).await.unwrap(); + OtterscanClient::::get_internal_operations(client, tx_hash).await.unwrap(); - OtterscanClient::::get_transaction_error(client, tx_hash).await.unwrap(); + OtterscanClient::::get_transaction_error(client, tx_hash).await.unwrap(); - OtterscanClient::::trace_transaction(client, tx_hash).await.unwrap(); + OtterscanClient::::trace_transaction(client, tx_hash).await.unwrap(); - OtterscanClient::::get_block_details(client, block_number).await.unwrap_err(); + OtterscanClient::::get_block_details(client, block_number) + .await + .unwrap_err(); - OtterscanClient::::get_block_details_by_hash(client, block_hash) + OtterscanClient::::get_block_details_by_hash(client, block_hash) .await .unwrap_err(); - OtterscanClient::::get_block_transactions( + OtterscanClient::::get_block_transactions( client, block_number, page_number, @@ -434,7 +466,7 @@ where .unwrap(); assert!(is_unimplemented( - OtterscanClient::::search_transactions_before( + OtterscanClient::::search_transactions_before( client, address, block_number, @@ -445,7 +477,7 @@ where .unwrap() )); assert!(is_unimplemented( - OtterscanClient::::search_transactions_after( + OtterscanClient::::search_transactions_after( client, address, block_number, @@ -455,13 +487,13 @@ where .err() .unwrap() )); - assert!(OtterscanClient::::get_transaction_by_sender_and_nonce( + assert!(OtterscanClient::::get_transaction_by_sender_and_nonce( client, sender, nonce ) .await .err() .is_none()); - assert!(OtterscanClient::::get_contract_creator(client, address) + assert!(OtterscanClient::::get_contract_creator(client, address) .await .unwrap() .is_none()); diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 0e2186e56ee..96d818ed4f9 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,5 +1,5 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types_eth::{Block, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; use jsonrpsee::{ server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, types::Request, @@ -75,7 +75,7 @@ async fn test_rpc_middleware() { .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version(&client).await.unwrap(); + EthApiClient::::protocol_version(&client).await.unwrap(); let count = mylayer.count.load(Ordering::Relaxed); assert_eq!(count, 1); } diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index bffd4fa308e..95ed98d8086 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -34,6 +34,7 @@ reth-node-api.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] } # ethereum +alloy-rlp.workspace = true alloy-serde.workspace = true alloy-eips.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 6500c304978..810400c6f6e 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -8,17 +8,18 @@ use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, transaction::TransactionRequest, - BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, - Index, StateContext, SyncStatus, Work, + BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index, + StateContext, SyncStatus, Work, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_provider::BlockReader; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; use crate::{ helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi}, - RpcBlock, RpcReceipt, RpcTransaction, + RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, }; /// Helper trait, unifies functionality that must be supported to implement all RPC methods for @@ -28,6 +29,7 @@ pub trait FullEthApiServer: RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApi + Clone { @@ -38,6 +40,7 @@ impl FullEthApiServer for T where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApi + Clone { @@ -46,7 +49,7 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi { /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -200,11 +203,11 @@ pub trait EthApi { /// Returns the block's header at given number. #[method(name = "getHeaderByNumber")] - async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; + async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; /// Returns the block's header at given hash. #[method(name = "getHeaderByHash")] - async fn header_by_hash(&self, hash: B256) -> RpcResult>; + async fn header_by_hash(&self, hash: B256) -> RpcResult>; /// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state. /// The transactions are packed into individual blocks. Overrides can be provided. @@ -366,9 +369,15 @@ impl RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > for T where - T: FullEthApi, + T: FullEthApi< + Provider: BlockReader< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, + >, jsonrpsee_types::error::ErrorObject<'static>: From, { /// Handler for: `eth_protocolVersion` @@ -607,13 +616,16 @@ where } /// Handler for: `eth_getHeaderByNumber` - async fn header_by_number(&self, block_number: BlockNumberOrTag) -> RpcResult> { + async fn header_by_number( + &self, + block_number: BlockNumberOrTag, + ) -> RpcResult>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); Ok(EthBlocks::rpc_block_header(self, block_number.into()).await?) } /// Handler for: `eth_getHeaderByHash` - async fn header_by_hash(&self, hash: B256) -> RpcResult> { + async fn header_by_hash(&self, hash: B256) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); Ok(EthBlocks::rpc_block_header(self, hash.into()).await?) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index cce0aa01b01..5f0d9f744ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -4,14 +4,17 @@ use std::sync::Arc; use alloy_consensus::BlockHeader; use alloy_eips::BlockId; -use alloy_rpc_types_eth::{Block, Header, Index}; +use alloy_primitives::Sealable; +use alloy_rlp::Encodable; +use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; use reth_node_api::BlockBody; use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; use reth_provider::{ - BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderReceipt, + BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderHeader, ProviderReceipt, }; use reth_rpc_types_compat::block::from_block; +use revm_primitives::U256; use crate::{ node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, @@ -35,10 +38,11 @@ pub type BlockAndReceiptsResult = Result< /// `eth_` namespace. pub trait EthBlocks: LoadBlock { /// Returns the block header for the given block id. + #[expect(clippy::type_complexity)] fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>>, Self::Error>> + Send where Self: FullEthApiTypes, { @@ -113,7 +117,7 @@ pub trait EthBlocks: LoadBlock { .get_sealed_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)? - .map(|b| b.body.transactions.len())) + .map(|b| b.body.transactions().len())) } } @@ -173,10 +177,11 @@ pub trait EthBlocks: LoadBlock { /// Returns uncle headers of given block. /// /// Returns an empty vec if there are none. + #[expect(clippy::type_complexity)] fn ommers( &self, block_id: BlockId, - ) -> Result>, Self::Error> { + ) -> Result>>, Self::Error> { self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } @@ -195,13 +200,22 @@ pub trait EthBlocks: LoadBlock { self.provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.ommers) + .and_then(|block| block.body.ommers().map(|o| o.to_vec())) } else { self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(Block::uncle_from_header)) + Ok(uncles.into_iter().nth(index.into()).map(|header| { + let block = alloy_consensus::Block::::uncle(header); + let size = U256::from(block.length()); + Block { + uncles: vec![], + header: Header::from_consensus(block.header.seal_slow(), None, Some(size)), + transactions: BlockTransactions::Uncle, + withdrawals: None, + } + })) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index aaa2ce131c9..f6d665121fc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -6,7 +6,7 @@ use crate::{ helpers::estimate::EstimateCall, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, RpcNodeCore, }; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types_eth::{ @@ -20,7 +20,9 @@ use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_api::BlockBody; use reth_primitives_traits::SignedTransaction; -use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; +use reth_provider::{ + BlockIdReader, BlockReader, ChainSpecProvider, HeaderProvider, ProviderHeader, +}; use reth_revm::{ database::StateProviderDatabase, db::CacheDB, @@ -70,7 +72,12 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { block: Option, ) -> impl Future> + Send where - Self: LoadBlock + FullEthApiTypes, + Self: LoadBlock< + Provider: BlockReader< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, + > + FullEthApiTypes, { async move { if payload.block_state_calls.len() > self.max_simulate_blocks() as usize { @@ -456,7 +463,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { } /// Executes code on state. -pub trait Call: LoadState> + SpawnBlocking { +pub trait Call: + LoadState>> + SpawnBlocking +{ /// Returns default gas limit to use for `eth_call` and tracing RPC methods. /// /// Data access in default trait method implementations. @@ -616,7 +625,7 @@ pub trait Call: LoadState> + SpawnBlocking { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in - let parent_block = block.parent_hash; + let parent_block = block.parent_hash(); let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { @@ -629,7 +638,7 @@ pub trait Call: LoadState> + SpawnBlocking { cfg.clone(), block_env.clone(), block_txs, - tx.hash(), + *tx.tx_hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 5843e945b8c..e0618cb6910 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -5,6 +5,7 @@ use alloy_primitives::U256; use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; use reth_chainspec::EthChainSpec; +use reth_primitives_traits::BlockBody; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, @@ -183,7 +184,7 @@ pub trait EthFees: LoadFee { percentiles, header.gas_used(), header.base_fee_per_gas().unwrap_or_default(), - &block.body.transactions, + block.body.transactions(), &receipts, ) .unwrap_or_default(), diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 8a6e5c84be1..2ca6c028c31 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,12 +3,11 @@ use super::SpawnBlocking; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; -use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::{ - eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, -}; -use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types_eth::{BlockNumberOrTag, Withdrawals}; +use alloy_consensus::{BlockHeader, Transaction}; +use alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK; +use alloy_network::Network; +use alloy_primitives::B256; +use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; @@ -17,19 +16,17 @@ use reth_evm::{ ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, - Receipt, RecoveredTx, SealedBlockWithSenders, -}; +use reth_primitives::{BlockExt, InvalidTransactionError, RecoveredTx, SealedBlockWithSenders}; +use reth_primitives_traits::receipt::ReceiptExt; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, ProviderError, + ProviderHeader, ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, - ResultAndState, SpecId, + ResultAndState, }, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; @@ -46,29 +43,40 @@ use tracing::debug; /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadPendingBlock: - EthApiTypes - + RpcNodeCore< - Provider: BlockReaderIdExt< - Transaction = reth_primitives::TransactionSigned, - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - > + EvmEnvProvider + EthApiTypes< + NetworkTypes: Network< + HeaderResponse = alloy_rpc_types_eth::Header>, + >, + > + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider> + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, - Evm: ConfigureEvm
>, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, > { /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. - fn pending_block(&self) -> &Mutex>; + #[expect(clippy::type_complexity)] + fn pending_block( + &self, + ) -> &Mutex, ProviderReceipt>>>; /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - fn pending_block_env_and_cfg(&self) -> Result { + #[expect(clippy::type_complexity)] + fn pending_block_env_and_cfg( + &self, + ) -> Result< + PendingBlockEnv, ProviderReceipt>, + Self::Error, + > { if let Some(block) = self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? { @@ -154,8 +162,8 @@ pub trait LoadPendingBlock: // check if the block is still good if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header - if pending.block_env.number.to::() == pending_block.block.number && - parent_hash == pending_block.block.parent_hash && + if pending.block_env.number.to::() == pending_block.block.number() && + parent_hash == pending_block.block.parent_hash() && now <= pending_block.expires_at { return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); @@ -188,34 +196,24 @@ pub trait LoadPendingBlock: } } - /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. + /// Assembles a receipt for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &RecoveredTx, + tx: &RecoveredTx>, result: ExecutionResult, cumulative_gas_used: u64, - ) -> Receipt { - #[allow(clippy::needless_update)] - Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - ..Default::default() - } - } + ) -> ProviderReceipt; - /// Calculates receipts root in block building. - /// - /// Panics if block is not in the [`ExecutionOutcome`]'s block range. - fn receipts_root( + /// Assembles a pending block. + fn assemble_block( &self, - _block_env: &BlockEnv, - execution_outcome: &ExecutionOutcome, - block_number: BlockNumber, - ) -> B256 { - execution_outcome.receipts_root_slow(block_number).expect("Block is present") - } + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> ProviderBlock; /// Builds a pending block using the configured provider and pool. /// @@ -223,12 +221,19 @@ pub trait LoadPendingBlock: /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. + #[expect(clippy::type_complexity)] fn build_block( &self, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, parent_hash: B256, - ) -> Result<(SealedBlockWithSenders, Vec), Self::Error> + ) -> Result< + ( + SealedBlockWithSenders>, + Vec>, + ), + Self::Error, + > where EthApiError: From, { @@ -253,14 +258,10 @@ pub trait LoadPendingBlock: block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let withdrawals: Option = None; - let withdrawals_root = None; - let chain_spec = self.provider().chain_spec(); let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); - let parent_beacon_block_root = None; system_caller .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; @@ -301,8 +302,7 @@ pub trait LoadPendingBlock: // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block - if let Some(blob_tx) = tx.transaction.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); + if let Some(tx_blob_gas) = tx.blob_gas_used() { if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { // we can't fit this _blob_ transaction into the block, so we mark it as // invalid, which removes its dependent transactions from @@ -360,8 +360,7 @@ pub trait LoadPendingBlock: db.commit(state); // add to the total blob gas used if the transaction successfully executed - if let Some(blob_tx) = tx.transaction.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); + if let Some(tx_blob_gas) = tx.blob_gas_used() { sum_blob_gas_used += tx_blob_gas; // if we've reached the max data gas per block, we can skip blob txs entirely @@ -388,7 +387,7 @@ pub trait LoadPendingBlock: let balance_increments = post_block_withdrawals_balance_increments( chain_spec.as_ref(), block_env.timestamp.try_into().unwrap_or(u64::MAX), - &withdrawals.clone().unwrap_or_default(), + &[], ); // increment account balances for withdrawals @@ -397,66 +396,23 @@ pub trait LoadPendingBlock: // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); + let execution_outcome: ExecutionOutcome> = + ExecutionOutcome::new( + db.take_bundle(), + vec![receipts.clone()].into(), + block_number, + Vec::new(), + ); let hashed_state = db.database.hashed_post_state(execution_outcome.state()); - let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); - - let logs_bloom = - execution_outcome.block_logs_bloom(block_number).expect("Block is present"); - // calculate the state root let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; - // create the block header - let transactions_root = calculate_transaction_root(&executed_txs); - - // check if cancun is activated to set eip4844 header fields correctly - let blob_gas_used = - (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then_some(sum_blob_gas_used); - - let requests_hash = chain_spec - .is_prague_active_at_timestamp(block_env.timestamp.to::()) - .then_some(EMPTY_REQUESTS_HASH); - - let header = Header { - parent_hash, - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: block_env.coinbase, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: block_env.timestamp.to::(), - mix_hash: block_env.prevrandao.unwrap_or_default(), - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee), - number: block_number, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - blob_gas_used: blob_gas_used.map(Into::into), - excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), - extra_data: Default::default(), - parent_beacon_block_root, - requests_hash, - target_blobs_per_block: None, - }; - // Convert Vec> to Vec - let receipts: Vec = receipts.into_iter().flatten().collect(); + let receipts: Vec<_> = receipts.into_iter().flatten().collect(); + let block = + self.assemble_block(cfg, block_env, parent_hash, state_root, executed_txs, &receipts); - // seal the block - let block = Block { - header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, - }; Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index dc8beab38a0..85c95414765 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -4,7 +4,6 @@ use alloy_dyn_abi::TypedData; use alloy_primitives::{Address, PrimitiveSignature as Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; -use reth_primitives::TransactionSigned; use reth_rpc_eth_types::SignError; use std::result; @@ -13,7 +12,7 @@ pub type Result = result::Result; /// An Ethereum Signer used via RPC. #[async_trait::async_trait] -pub trait EthSigner: Send + Sync + DynClone { +pub trait EthSigner: Send + Sync + DynClone { /// Returns the available accounts for this signer. fn accounts(&self) -> Vec
; @@ -26,17 +25,13 @@ pub trait EthSigner: Send + Sync + DynClone { async fn sign(&self, address: Address, message: &[u8]) -> Result; /// signs a transaction request using the given account in request - async fn sign_transaction( - &self, - request: TransactionRequest, - address: &Address, - ) -> Result; + async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result; /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; } -dyn_clone::clone_trait_object!(EthSigner); +dyn_clone::clone_trait_object!( EthSigner); /// Adds 20 random dev signers for access via the API. Used in dev mode. #[auto_impl::auto_impl(&)] diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 9957a00a41d..13ad9b778b2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -22,11 +22,14 @@ pub trait EthApiSpec: Network: NetworkInfo, > { + /// The transaction type signers are using. + type Transaction; + /// Returns the block node is started on. fn starting_block(&self) -> U256; /// Returns a handle to the signers owned by provider. - fn signers(&self) -> &parking_lot::RwLock>>; + fn signers(&self) -> &parking_lot::RwLock>>>; /// Returns the current ethereum protocol version. fn protocol_version(&self) -> impl Future> + Send { diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 50ff1b557b5..4c9ccecd363 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -2,7 +2,7 @@ //! RPC methods. use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; -use alloy_consensus::{constants::KECCAK_EMPTY, Header}; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader}; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; @@ -12,8 +12,8 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; use reth_provider::{ - BlockIdReader, BlockNumReader, ChainSpecProvider, EvmEnvProvider as _, StateProvider, - StateProviderBox, StateProviderFactory, + BlockIdReader, BlockNumReader, ChainSpecProvider, EvmEnvProvider as _, ProviderHeader, + StateProvider, StateProviderBox, StateProviderFactory, }; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_transaction_pool::TransactionPool; @@ -247,14 +247,14 @@ pub trait LoadState: /// This is used for tracing raw blocks fn evm_env_for_raw_block( &self, - header: &Header, + header: &ProviderHeader, ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { async move { // get the parent config first - let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; + let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash().into()).await?; let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; self.evm_config().fill_block_env(&mut block_env, header, after_merge); diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 114b4c41d90..e000218e70e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -3,14 +3,15 @@ use std::{fmt::Display, sync::Arc}; use crate::{FromEvmError, RpcNodeCore}; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::SealedBlockWithSenders; -use reth_provider::BlockReader; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_provider::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -25,7 +26,15 @@ use revm_primitives::{ use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState> { +pub trait Trace: + LoadState< + Provider: BlockReader, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, +> +{ /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -190,7 +199,7 @@ pub trait Trace: LoadState( &self, block_id: BlockId, - block: Option::Block>>>, + block: Option>>>, highest_index: Option, config: TracingInspectorConfig, f: F, @@ -271,7 +280,7 @@ pub trait Trace: LoadState( &self, block_id: BlockId, - block: Option>, + block: Option>>>, highest_index: Option, mut inspector_setup: Setup, f: F, @@ -304,7 +313,7 @@ pub trait Trace: LoadState(); @@ -329,7 +338,7 @@ pub trait Trace: LoadState( &self, block_id: BlockId, - block: Option>, + block: Option>>>, config: TracingInspectorConfig, f: F, ) -> impl Future>, Self::Error>> + Send @@ -428,7 +437,7 @@ pub trait Trace: LoadState( &self, block_id: BlockId, - block: Option>, + block: Option>>>, insp_setup: Setup, f: F, ) -> impl Future>, Self::Error>> + Send @@ -459,7 +468,7 @@ pub trait Trace: LoadState + DatabaseCommit>( &self, - block: &SealedBlockWithSenders, + block: &SealedBlockWithSenders>, db: &mut DB, cfg: &CfgEnvWithHandlerCfg, block_env: &BlockEnv, @@ -472,12 +481,12 @@ pub trait Trace: LoadState { /// Returns a handle for signing data. /// /// Singer access in default (L1) trait method implementations. - fn signers(&self) -> &parking_lot::RwLock>>; + #[expect(clippy::type_complexity)] + fn signers(&self) -> &parking_lot::RwLock>>>>; /// Returns the transaction by hash. /// @@ -213,7 +214,7 @@ pub trait EthTransactions: LoadTransaction { let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { - hash: Some(tx.hash()), + hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas.map(u128::from), @@ -294,7 +295,7 @@ pub trait EthTransactions: LoadTransaction { .find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce) .map(|(index, (signer, tx))| { let tx_info = TransactionInfo { - hash: Some(tx.hash()), + hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas.map(u128::from), @@ -414,7 +415,7 @@ pub trait EthTransactions: LoadTransaction { &self, from: &Address, txn: TransactionRequest, - ) -> impl Future> + Send { + ) -> impl Future, Self::Error>> + Send { async move { self.find_signer(from)? .sign_transaction(txn, from) @@ -467,10 +468,11 @@ pub trait EthTransactions: LoadTransaction { } /// Returns the signer for the given account, if found in configured signers. + #[expect(clippy::type_complexity)] fn find_signer( &self, account: &Address, - ) -> Result, Self::Error> { + ) -> Result> + 'static)>, Self::Error> { self.signers() .read() .iter() diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index c4a255985cb..d9c7f39a440 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -29,7 +29,7 @@ pub use reth_rpc_eth_types::error::{ AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, }; pub use reth_rpc_types_compat::TransactionCompat; -pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; +pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index c97ea5735ee..2da1bdac281 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -24,7 +24,7 @@ pub trait EthApiTypes: Send + Sync + Clone { + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. - type NetworkTypes: Network; + type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; @@ -41,6 +41,9 @@ pub type RpcBlock = Block, ::HeaderResponse>; /// Adapter for network specific receipt type. pub type RpcReceipt = ::ReceiptResponse; +/// Adapter for network specific header type. +pub type RpcHeader = ::HeaderResponse; + /// Adapter for network specific error type. pub type RpcError = ::Error; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index bd23e3f42ab..ef2a61dd720 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -14,13 +14,13 @@ use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv { +pub struct PendingBlockEnv { /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. pub cfg: CfgEnvWithHandlerCfg, /// Configured [`BlockEnv`] for the pending block. pub block_env: BlockEnv, /// Origin block for the config - pub origin: PendingBlockEnvOrigin, + pub origin: PendingBlockEnvOrigin, } /// The origin for a configured [`PendingBlockEnv`] @@ -77,11 +77,11 @@ impl PendingBlockEnvOrigin { /// Locally built pending block for `pending` tag. #[derive(Debug, Constructor)] -pub struct PendingBlock { +pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: SealedBlockWithSenders, + pub block: SealedBlockWithSenders, /// The receipts for the pending block - pub receipts: Vec, + pub receipts: Vec, } diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index a18771af3b0..36a01fa5903 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -8,7 +8,7 @@ use std::{ use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Transaction}; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Header, Transaction}; use alloy_rpc_types_trace::{ common::TraceResult, geth::{GethDebugTracerType, GethDebugTracingOptions, GethTrace}, @@ -77,7 +77,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + DebugApiClient + Sync, + T: EthApiClient + DebugApiClient + Sync, { type Provider = T; diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 4c5d2ccb2a6..e67946f7b0a 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ //! Integration tests for the trace API. use alloy_primitives::map::HashSet; -use alloy_rpc_types_eth::{Block, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Transaction}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; @@ -113,7 +113,7 @@ async fn debug_trace_block_entire_chain() { let client = HttpClientBuilder::default().build(url).unwrap(); let current_block: u64 = - >::block_number(&client) + >::block_number(&client) .await .unwrap() .try_into() diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index d3944356117..d4e1aac88bf 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 564f5a939fc..d3238757fc5 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,13 +1,13 @@ //! Compatibility functions for rpc `Block` type. -use alloy_consensus::Sealed; +use alloy_consensus::{BlockHeader, Sealable, Sealed}; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; -use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, TransactionSigned}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, BlockWithSenders}; +use reth_primitives_traits::{Block as BlockTrait, BlockBody, SignedTransaction}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -15,19 +15,24 @@ use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; /// [`BlockTransactionsKind`] /// /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. -pub fn from_block( - block: BlockWithSenders, +#[expect(clippy::type_complexity)] +pub fn from_block( + block: BlockWithSenders, total_difficulty: U256, kind: BlockTransactionsKind, block_hash: Option, tx_resp_builder: &T, -) -> Result, T::Error> { +) -> Result>, T::Error> +where + T: TransactionCompat<<::Body as BlockBody>::Transaction>, + B: BlockTrait, +{ match kind { BlockTransactionsKind::Hashes => { - Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) + Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) } BlockTransactionsKind::Full => { - from_block_full::(block, total_difficulty, block_hash, tx_resp_builder) + from_block_full::(block, total_difficulty, block_hash, tx_resp_builder) } } } @@ -37,13 +42,16 @@ pub fn from_block( /// /// This will populate the `transactions` field with only the hashes of the transactions in the /// block: [`BlockTransactions::Hashes`] -pub fn from_block_with_tx_hashes( - block: BlockWithSenders, +pub fn from_block_with_tx_hashes( + block: BlockWithSenders, total_difficulty: U256, block_hash: Option, -) -> Block { - let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.transactions.iter().map(|tx| tx.hash()).collect(); +) -> Block> +where + B: BlockTrait, +{ + let block_hash = block_hash.unwrap_or_else(|| block.header().hash_slow()); + let transactions = block.body().transactions().iter().map(|tx| *tx.tx_hash()).collect(); from_block_with_transactions( block.length(), @@ -59,25 +67,30 @@ pub fn from_block_with_tx_hashes( /// /// This will populate the `transactions` field with the _full_ /// [`TransactionCompat::Transaction`] objects: [`BlockTransactions::Full`] -pub fn from_block_full( - mut block: BlockWithSenders, +#[expect(clippy::type_complexity)] +pub fn from_block_full( + block: BlockWithSenders, total_difficulty: U256, block_hash: Option, tx_resp_builder: &T, -) -> Result, T::Error> { - let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); - let block_number = block.block.number; - let base_fee_per_gas = block.block.base_fee_per_gas; +) -> Result>, T::Error> +where + T: TransactionCompat<<::Body as BlockBody>::Transaction>, + B: BlockTrait, +{ + let block_hash = block_hash.unwrap_or_else(|| block.block.header().hash_slow()); + let block_number = block.block.header().number(); + let base_fee_per_gas = block.block.header().base_fee_per_gas(); // NOTE: we can safely remove the body here because not needed to finalize the `Block` in // `from_block_with_transactions`, however we need to compute the length before let block_length = block.block.length(); - let transactions = std::mem::take(&mut block.block.body.transactions); + let transactions = block.block.body().transactions().to_vec(); let transactions_with_senders = transactions.into_iter().zip(block.senders); let transactions = transactions_with_senders .enumerate() .map(|(idx, (tx, sender))| { - let tx_hash = tx.hash(); + let tx_hash = *tx.tx_hash(); let signed_tx_ec_recovered = tx.with_signer(sender); let tx_info = TransactionInfo { hash: Some(tx_hash), @@ -87,7 +100,7 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::( + from_recovered_with_block_context::<_, T>( signed_tx_ec_recovered, tx_info, tx_resp_builder, @@ -105,23 +118,28 @@ pub fn from_block_full( } #[inline] -fn from_block_with_transactions( +fn from_block_with_transactions( block_length: usize, block_hash: B256, - block: PrimitiveBlock, + block: B, total_difficulty: U256, transactions: BlockTransactions, -) -> Block { +) -> Block> { let withdrawals = block - .header - .withdrawals_root + .header() + .withdrawals_root() .is_some() - .then(|| block.body.withdrawals.map(Withdrawals::into_inner).map(Into::into)) + .then(|| block.body().withdrawals().cloned().map(Withdrawals::into_inner).map(Into::into)) .flatten(); - let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); + let uncles = block + .body() + .ommers() + .map(|o| o.iter().map(|h| h.hash_slow()).collect()) + .unwrap_or_default(); + let (header, _) = block.split(); let header = Header::from_consensus( - Sealed::new_unchecked(block.header, block_hash), + Sealed::new_unchecked(header, block_hash), Some(total_difficulty), Some(U256::from(block_length)), ); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 765b7e719b8..91236ca9de5 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -18,11 +19,11 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockExt, NodePrimitives, SealedBlockWithSenders}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives::{BlockExt, NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::{Block as _, BlockBody, SignedTransaction}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, - StateProviderFactory, TransactionVariant, + BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, + StateProofProvider, StateProviderFactory, TransactionVariant, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; @@ -81,9 +82,8 @@ where + StateProviderFactory + 'static, Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives::Provider as BlockReader>::Block>, - >, + BlockExecutor: + BlockExecutorProvider>>, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -93,7 +93,7 @@ where /// Trace the entire block asynchronously async fn trace_block( &self, - block: Arc, + block: Arc>>, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, @@ -101,8 +101,8 @@ where // replay all transactions of the block let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(block.parent_hash.into(), move |state| { - let mut results = Vec::with_capacity(block.body.transactions.len()); + .spawn_with_state_at_block(block.parent_hash().into(), move |state| { + let mut results = Vec::with_capacity(block.body.transactions().len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; @@ -110,7 +110,7 @@ where let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; while let Some((index, (signer, tx))) = transactions.next() { - let tx_hash = tx.hash(); + let tx_hash = *tx.tx_hash(); let env = EnvWithHandlerCfg { env: Env::boxed( @@ -157,18 +157,22 @@ where rlp_block: Bytes, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { - let block = Block::decode(&mut rlp_block.as_ref()) + let block: ProviderBlock = Decodable::decode(&mut rlp_block.as_ref()) .map_err(BlockError::RlpDecodeRawBlock) .map_err(Eth::Error::from_eth_err)?; - let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; + let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(block.header()).await?; // Depending on EIP-2 we need to recover the transactions differently - let senders = if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) + let senders = if self + .inner + .provider + .chain_spec() + .is_homestead_active_at_block(block.header().number()) { block - .body - .transactions + .body() + .transactions() .iter() .map(|tx| { tx.recover_signer() @@ -178,8 +182,8 @@ where .collect::, Eth::Error>>()? } else { block - .body - .transactions + .body() + .transactions() .iter() .map(|tx| { tx.recover_signer_unchecked() @@ -237,7 +241,7 @@ where // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in - let state_at: BlockId = block.parent_hash.into(); + let state_at: BlockId = block.parent_hash().into(); let block_hash = block.hash(); let this = self.clone(); @@ -258,7 +262,7 @@ where cfg.clone(), block_env.clone(), block_txs, - tx.hash(), + *tx.tx_hash(), )?; let env = EnvWithHandlerCfg { @@ -277,7 +281,7 @@ where Some(TransactionContext { block_hash: Some(block_hash), tx_index: Some(index), - tx_hash: Some(tx.hash()), + tx_hash: Some(*tx.tx_hash()), }), &mut None, ) @@ -514,15 +518,15 @@ where // we're essentially replaying the transactions in the block here, hence we need the state // that points to the beginning of the block, which is the state at the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; // if a transaction index is provided, we need to replay the transactions until the index - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself // this works with the exception of the PENDING block, because its state might not exist if // built locally - if !target_block.is_pending() && num_txs == block.body.transactions.len() { + if !target_block.is_pending() && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } @@ -622,7 +626,7 @@ where .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; self.eth_api() - .spawn_with_state_at_block(block.parent_hash.into(), move |state_provider| { + .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); let block_executor = this.inner.block_executor.executor(db); @@ -630,7 +634,7 @@ where let _ = block_executor .execute_with_state_closure( - (&(*block).clone().unseal(), block.difficulty).into(), + (&(*block).clone().unseal(), block.difficulty()).into(), |statedb: &State<_>| { witness_record.record_executed_state(statedb); }, diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index fca78d62d63..a9c316571ac 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -9,7 +9,7 @@ use jsonrpsee::core::RpcResult as Result; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; -use reth_rpc_eth_api::{FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; +use reth_rpc_eth_api::{FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; use tracing_futures::Instrument; macro_rules! engine_span { @@ -41,6 +41,7 @@ where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApiTypes, EthFilter: EthFilterApiServer>, { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index d5117650a13..4866033c4bc 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,12 +1,13 @@ //! `Eth` bundle implementation and helpers. -use alloy_consensus::Transaction as _; +use alloy_consensus::{BlockHeader, Transaction as _}; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{PooledTransactionsElement, Transaction}; +use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_api::{ @@ -15,12 +16,13 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolConsensusTx, PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ db::{CacheDB, DatabaseCommit, DatabaseRef}, primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; -use std::{ops::Deref, sync::Arc}; +use std::sync::Arc; /// `Eth` bundle implementation. pub struct EthBundle { @@ -42,7 +44,16 @@ impl EthBundle { impl EthBundle where - Eth: EthTransactions + LoadPendingBlock + Call + 'static, + Eth: EthTransactions< + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus: From, + Pooled = PooledTransactionsElement, + >, + >, + > + LoadPendingBlock + + Call + + 'static, { /// Simulates a bundle of transactions at the top of a given block number with the state of /// another (or the same) block. This can be used to simulate future blocks with the current @@ -79,7 +90,7 @@ where let transactions = txs .into_iter() - .map(recover_raw_transaction::) + .map(recover_raw_transaction::>) .collect::, _>>()? .into_iter() .map(|tx| tx.to_components()) @@ -192,12 +203,10 @@ where })?; } - let tx = tx.into_transaction(); + let tx: PoolConsensusTx = tx.into(); - hasher.update(tx.hash()); - let gas_price = Transaction::effective_tip_per_gas(tx.deref(), basefee) - .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) - .map_err(Eth::Error::from_eth_err)?; + hasher.update(*tx.tx_hash()); + let gas_price = tx.effective_gas_price(basefee); eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; @@ -235,7 +244,7 @@ where gas_price: U256::from(gas_price), gas_used, to_address: tx.to(), - tx_hash: tx.hash(), + tx_hash: *tx.tx_hash(), value, revert, }; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 0a17e5e5f2b..1fe08d1c57f 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -244,7 +244,7 @@ pub struct EthApiInner { /// An interface to interact with the network network: Network, /// All configured Signers - signers: parking_lot::RwLock>>, + signers: parking_lot::RwLock>>>, /// The async cache frontend for eth related data eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions @@ -260,7 +260,7 @@ pub struct EthApiInner { /// The type that can spawn tasks which would otherwise block. task_spawner: Box, /// Cached pending block if any - pending_block: Mutex>, + pending_block: Mutex>>, /// A pool dedicated to CPU heavy blocking tasks. blocking_task_pool: BlockingTaskPool, /// Cache for block fees history @@ -343,7 +343,9 @@ where /// Returns a handle to the pending block. #[inline] - pub const fn pending_block(&self) -> &Mutex> { + pub const fn pending_block( + &self, + ) -> &Mutex>> { &self.pending_block } @@ -397,7 +399,9 @@ where /// Returns a handle to the signers. #[inline] - pub const fn signers(&self) -> &parking_lot::RwLock>> { + pub const fn signers( + &self, + ) -> &parking_lot::RwLock>>> { &self.signers } @@ -579,7 +583,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -601,7 +605,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -624,7 +628,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -647,7 +651,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 409a3095abd..51a76f4e98f 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,8 +1,10 @@ //! Contains RPC handler implementations specific to blocks. +use alloy_consensus::BlockHeader; use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReader, HeaderProvider}; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_provider::BlockReader; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcNodeCoreExt, RpcReceipt, @@ -16,7 +18,10 @@ where Self: LoadBlock< Error = EthApiError, NetworkTypes: alloy_network::Network, - Provider: HeaderProvider, + Provider: BlockReader< + Transaction = reth_primitives::TransactionSigned, + Receipt = reth_primitives::Receipt, + >, >, Provider: BlockReader, { @@ -28,21 +33,21 @@ where Self: LoadReceipt, { if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number; - let base_fee = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); return block .body - .transactions - .into_iter() + .transactions() + .iter() .zip(receipts.iter()) .enumerate() .map(|(idx, (tx, receipt))| { let meta = TransactionMeta { - tx_hash: tx.hash(), + tx_hash: *tx.tx_hash(), index: idx as u64, block_hash, block_number, @@ -50,7 +55,7 @@ where excess_blob_gas, timestamp, }; - EthReceiptBuilder::new(&tx, meta, receipt, &receipts) + EthReceiptBuilder::new(tx, meta, receipt, &receipts) .map(|builder| builder.build()) }) .collect::, Self::Error>>() diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 3835503a4c8..bddd2b1b8fc 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -3,7 +3,7 @@ use crate::EthApi; use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_provider::BlockReader; +use reth_provider::{BlockReader, ProviderHeader}; use reth_rpc_eth_api::helpers::{ estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, }; @@ -17,7 +17,7 @@ where impl Call for EthApi where - Self: LoadState> + SpawnBlocking, + Self: LoadState>> + SpawnBlocking, EvmConfig: ConfigureEvm
, Provider: BlockReader, { diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 8d8d15d2e46..344f56da849 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,11 +1,18 @@ //! Support for building a pending block with transactions from local view of mempool. -use alloy_consensus::Header; +use alloy_consensus::{constants::EMPTY_WITHDRAWALS, Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; +use reth_primitives::{ + logs_bloom, + proofs::{calculate_receipt_root_no_memo, calculate_transaction_root}, + BlockBody, Receipt, +}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderTx, - StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, + ProviderReceipt, ProviderTx, StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, @@ -13,14 +20,16 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId, B256}; use crate::EthApi; impl LoadPendingBlock for EthApi where - Self: SpawnBlocking - + RpcNodeCore< + Self: SpawnBlocking< + NetworkTypes: alloy_network::Network, + > + RpcNodeCore< Provider: BlockReaderIdExt< Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, @@ -34,10 +43,81 @@ where >, Evm: ConfigureEvm
>, >, - Provider: BlockReader, + Provider: BlockReader, { #[inline] - fn pending_block(&self) -> &tokio::sync::Mutex> { + fn pending_block( + &self, + ) -> &tokio::sync::Mutex< + Option, ProviderReceipt>>, + > { self.inner.pending_block() } + + fn assemble_block( + &self, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> reth_provider::ProviderBlock { + let transactions_root = calculate_transaction_root(&transactions); + let receipts_root = calculate_receipt_root_no_memo(&receipts.iter().collect::>()); + + let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + + let header = Header { + parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) + .then_some(EMPTY_WITHDRAWALS), + logs_bloom, + timestamp: block_env.timestamp.to::(), + mix_hash: block_env.prevrandao.unwrap_or_default(), + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(block_env.basefee.to::()), + number: block_env.number.to::(), + gas_limit: block_env.gas_limit.to::(), + difficulty: U256::ZERO, + gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), + blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() + }), + excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), + extra_data: Default::default(), + parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) + .then_some(B256::ZERO), + requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) + .then_some(EMPTY_REQUESTS_HASH), + target_blobs_per_block: None, + }; + + // seal the block + reth_primitives::Block { + header, + body: BlockBody { transactions, ommers: vec![], withdrawals: None }, + } + } + + fn assemble_receipt( + &self, + tx: &reth_primitives::RecoveredTx>, + result: revm_primitives::ExecutionResult, + cumulative_gas_used: u64, + ) -> reth_provider::ProviderReceipt { + #[allow(clippy::needless_update)] + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + ..Default::default() + } + } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 022c3153b01..3528a966e3f 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -10,7 +10,6 @@ use alloy_primitives::{eip191_hash_message, Address, PrimitiveSignature as Signa use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_primitives::TransactionSigned; use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; @@ -35,14 +34,14 @@ pub struct DevSigner { #[allow(dead_code)] impl DevSigner { /// Generates a random dev signer which satisfies [`EthSigner`] trait - pub fn random() -> Box { + pub fn random() -> Box> { let mut signers = Self::random_signers(1); signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait - pub fn random_signers(num: u32) -> Vec> { + pub fn random_signers(num: u32) -> Vec + 'static>> { let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random_with(&mut rand::thread_rng()); @@ -51,7 +50,7 @@ impl DevSigner { let addresses = vec![address]; let accounts = HashMap::from([(address, sk)]); - signers.push(Box::new(Self { addresses, accounts }) as Box); + signers.push(Box::new(Self { addresses, accounts }) as Box>); } signers } @@ -67,7 +66,7 @@ impl DevSigner { } #[async_trait::async_trait] -impl EthSigner for DevSigner { +impl EthSigner for DevSigner { fn accounts(&self) -> Vec
{ self.addresses.clone() } @@ -83,11 +82,7 @@ impl EthSigner for DevSigner { self.sign_hash(hash, address) } - async fn sign_transaction( - &self, - request: TransactionRequest, - address: &Address, - ) -> Result { + async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result { // create local signer wallet from signing key let signer = self.accounts.get(address).ok_or(SignError::NoAccount)?.clone(); let wallet = EthereumWallet::from(signer); @@ -98,7 +93,7 @@ impl EthSigner for DevSigner { // decode transaction into signed transaction type let encoded = txn_envelope.encoded_2718(); - let txn_signed = TransactionSigned::decode_2718(&mut encoded.as_ref()) + let txn_signed = T::decode_2718(&mut encoded.as_ref()) .map_err(|_| SignError::InvalidTransactionRequest)?; Ok(txn_signed) @@ -115,6 +110,7 @@ mod tests { use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; + use reth_primitives::TransactionSigned; use revm_primitives::TxKind; use super::*; @@ -197,7 +193,9 @@ mod tests { let data: TypedData = serde_json::from_str(eip_712_example).unwrap(); let signer = build_signer(); let from = *signer.addresses.first().unwrap(); - let sig = signer.sign_typed_data(from, &data).unwrap(); + let sig = + EthSigner::::sign_typed_data(&signer, from, &data) + .unwrap(); let expected = Signature::new( U256::from_str_radix( "5318aee9942b84885761bb20e768372b76e7ee454fc4d39b59ce07338d15a06c", @@ -219,7 +217,9 @@ mod tests { let message = b"Test message"; let signer = build_signer(); let from = *signer.addresses.first().unwrap(); - let sig = signer.sign(from, message).await.unwrap(); + let sig = EthSigner::::sign(&signer, from, message) + .await + .unwrap(); let expected = Signature::new( U256::from_str_radix( "54313da7432e4058b8d22491b2e7dbb19c7186c35c24155bec0820a8a2bfe0c1", @@ -255,7 +255,8 @@ mod tests { nonce: Some(0u64), ..Default::default() }; - let txn_signed = signer.sign_transaction(request, &from).await; + let txn_signed: std::result::Result = + signer.sign_transaction(request, &from).await; assert!(txn_signed.is_ok()); assert_eq!(Bytes::from(message.to_vec()), txn_signed.unwrap().input().0); diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index f7bc89ae2b1..41c4a5b07c3 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,7 +1,9 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; -use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider, StageCheckpointReader}; +use reth_provider::{ + BlockNumReader, BlockReader, ChainSpecProvider, ProviderTx, StageCheckpointReader, +}; use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use crate::EthApi; @@ -16,11 +18,16 @@ where >, Provider: BlockReader, { + type Transaction = ProviderTx; + fn starting_block(&self) -> U256 { self.inner.starting_block() } - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers( + &self, + ) -> &parking_lot::RwLock>>> + { self.inner.signers() } } diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index ed7150153e5..69b4d9806bf 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,15 +1,20 @@ //! Contains RPC handler implementations specific to tracing. -use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_provider::BlockReader; +use reth_provider::{BlockReader, ProviderHeader, ProviderTx}; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; impl Trace for EthApi where - Self: LoadState>, + Self: LoadState< + Provider: BlockReader, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, + >, Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 647e16c25af..04ed812fab2 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -1,6 +1,6 @@ //! Contains RPC handler implementations specific to transactions -use reth_provider::{BlockReader, BlockReaderIdExt, TransactionsProvider}; +use reth_provider::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FullEthApiTypes, RpcNodeCoreExt, @@ -13,10 +13,10 @@ impl EthTransactions for EthApi where Self: LoadTransaction, - Provider: BlockReader, + Provider: BlockReader>, { #[inline] - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers(&self) -> &parking_lot::RwLock>>>> { self.inner.signers() } } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 0702d7df0e4..f4e77b4fbe5 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -1,5 +1,6 @@ //! `Eth` Sim bundle implementation and helpers. +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; @@ -10,8 +11,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{PooledTransactionsElement, TransactionSigned}; -use reth_provider::{ChainSpecProvider, HeaderProvider}; +use reth_provider::{ChainSpecProvider, HeaderProvider, ProviderTx}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; use reth_rpc_eth_api::{ @@ -20,6 +20,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolConsensusTx, PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ db::CacheDB, primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, @@ -45,9 +46,9 @@ const SBUNDLE_PAYOUT_MAX_COST: u64 = 30_000; /// A flattened representation of a bundle item containing transaction and associated metadata. #[derive(Clone, Debug)] -pub struct FlattenedBundleItem { +pub struct FlattenedBundleItem { /// The signed transaction - pub tx: TransactionSigned, + pub tx: T, /// The address that signed the transaction pub signer: Address, /// Whether the transaction is allowed to revert @@ -93,7 +94,7 @@ where fn parse_and_flatten_bundle( &self, request: &SendBundleRequest, - ) -> Result, EthApiError> { + ) -> Result>>, EthApiError> { let mut items = Vec::new(); // Stack for processing bundles @@ -171,10 +172,11 @@ where match &body[idx] { BundleItem::Tx { tx, can_revert } => { let recovered_tx = - recover_raw_transaction::(tx.clone()) + recover_raw_transaction::>(tx.clone()) .map_err(EthApiError::from)?; let (tx, signer) = recovered_tx.to_components(); - let tx = tx.into_transaction(); + let tx: PoolConsensusTx = + ::Transaction::pooled_into_consensus(tx); let refund_percent = validity.as_ref().and_then(|v| v.refund.as_ref()).and_then(|refunds| { diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index d19dcf4d609..173a2ff3495 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,8 +1,8 @@ -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; -use alloy_rpc_types_eth::{BlockTransactions, Header, TransactionReceipt}; +use alloy_rpc_types_eth::{BlockTransactions, TransactionReceipt}; use alloy_rpc_types_trace::{ otterscan::{ BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, @@ -15,7 +15,7 @@ use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, - FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction, TransactionCompat, + FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, TransactionCompat, }; use reth_rpc_eth_types::{utils::binary_search, EthApiError}; use reth_rpc_server_types::result::internal_rpc_err; @@ -49,7 +49,7 @@ where &self, block: RpcBlock, receipts: Vec>, - ) -> RpcResult { + ) -> RpcResult>> { // blob fee is burnt, so we don't need to calculate it let total_fees = receipts .iter() @@ -61,18 +61,23 @@ where } #[async_trait] -impl OtterscanServer> for OtterscanApi +impl OtterscanServer, RpcHeader> + for OtterscanApi where Eth: EthApiServer< RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + EthTransactions + TraceExt + 'static, { /// Handler for `{ots,erigon}_getHeaderByNumber` - async fn get_header_by_number(&self, block_number: u64) -> RpcResult> { + async fn get_header_by_number( + &self, + block_number: u64, + ) -> RpcResult>> { self.eth.header_by_number(BlockNumberOrTag::Number(block_number)).await } @@ -165,7 +170,10 @@ where } /// Handler for `ots_getBlockDetails` - async fn get_block_details(&self, block_number: u64) -> RpcResult> { + async fn get_block_details( + &self, + block_number: u64, + ) -> RpcResult>> { let block_id = block_number.into(); let block = self.eth.block_by_number(block_id, true); let block_id = block_id.into(); @@ -178,7 +186,10 @@ where } /// Handler for `getBlockDetailsByHash` - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult> { + async fn get_block_details_by_hash( + &self, + block_hash: B256, + ) -> RpcResult>> { let block = self.eth.block_by_hash(block_hash, true); let block_id = block_hash.into(); let receipts = self.eth.block_receipts(block_id); @@ -195,7 +206,9 @@ where block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult, Header>> { + ) -> RpcResult< + OtsBlockTransactions, RpcHeader>, + > { let block_id = block_number.into(); // retrieve full block and its receipts let block = self.eth.block_by_number(block_id, true); @@ -236,7 +249,7 @@ where } // Crop receipts and transform them into OtsTransactionReceipt - let timestamp = Some(block.header.timestamp); + let timestamp = Some(block.header.timestamp()); let receipts = receipts .drain(page_start..page_end) .zip(transactions.iter().map(Transaction::ty)) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 009203f757d..44867f51378 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,4 +1,4 @@ -use alloy_consensus::Header; +use alloy_consensus::BlockHeader as _; use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types_eth::{ @@ -19,13 +19,14 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::{BlockBody, BlockHeader}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ db::{CacheDB, DatabaseCommit}, primitives::EnvWithHandlerCfg, @@ -116,8 +117,8 @@ where trace_types: HashSet, block_id: Option, ) -> Result { - let tx = recover_raw_transaction::(tx)? - .into_ecrecovered_transaction(); + let tx = recover_raw_transaction::>(tx)? + .map_transaction(::Transaction::pooled_into_consensus); let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; @@ -313,11 +314,13 @@ where // add reward traces for all blocks for block in &blocks { - if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { + if let Some(base_block_reward) = + self.calculate_base_block_reward(block.header.header())? + { all_traces.extend( self.extract_reward_traces( - &block.header, - &block.body.ommers, + block.header.header(), + block.body.ommers(), base_block_reward, ) .into_iter() @@ -393,10 +396,12 @@ where maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { - if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { + if let Some(base_block_reward) = + self.calculate_base_block_reward(block.header.header())? + { traces.extend(self.extract_reward_traces( - &block.header, - &block.body.ommers, + block.block.header(), + block.body.ommers(), base_block_reward, )); } @@ -490,7 +495,7 @@ where Ok(Some(BlockOpcodeGas { block_hash: block.hash(), - block_number: block.header.number, + block_number: block.header.number(), transactions, })) } @@ -500,25 +505,28 @@ where /// - if Paris hardfork is activated, no block rewards are given /// - if Paris hardfork is not activated, calculate block rewards with block number only /// - if Paris hardfork is unknown, calculate block rewards with block number and ttd - fn calculate_base_block_reward(&self, header: &Header) -> Result, Eth::Error> { + fn calculate_base_block_reward( + &self, + header: &H, + ) -> Result, Eth::Error> { let chain_spec = self.provider().chain_spec(); - let is_paris_activated = chain_spec.is_paris_active_at_block(header.number); + let is_paris_activated = chain_spec.is_paris_active_at_block(header.number()); Ok(match is_paris_activated { Some(true) => None, - Some(false) => Some(base_block_reward_pre_merge(&chain_spec, header.number)), + Some(false) => Some(base_block_reward_pre_merge(&chain_spec, header.number())), None => { // if Paris hardfork is unknown, we need to fetch the total difficulty at the // block's height and check if it is pre-merge to calculate the base block reward if let Some(header_td) = self .provider() - .header_td_by_number(header.number) + .header_td_by_number(header.number()) .map_err(Eth::Error::from_eth_err)? { base_block_reward( chain_spec.as_ref(), - header.number, - header.difficulty, + header.number(), + header.difficulty(), header_td, ) } else { @@ -531,30 +539,33 @@ where /// Extracts the reward traces for the given block: /// - block reward /// - uncle rewards - fn extract_reward_traces( + fn extract_reward_traces( &self, - header: &Header, - ommers: &[Header], + header: &H, + ommers: Option<&[H]>, base_block_reward: u128, ) -> Vec { - let mut traces = Vec::with_capacity(ommers.len() + 1); + let ommers_cnt = ommers.map(|o| o.len()).unwrap_or_default(); + let mut traces = Vec::with_capacity(ommers_cnt + 1); - let block_reward = block_reward(base_block_reward, ommers.len()); + let block_reward = block_reward(base_block_reward, ommers_cnt); traces.push(reward_trace( header, RewardAction { - author: header.beneficiary, + author: header.beneficiary(), reward_type: RewardType::Block, value: U256::from(block_reward), }, )); + let Some(ommers) = ommers else { return traces }; + for uncle in ommers { - let uncle_reward = ommer_reward(base_block_reward, header.number, uncle.number); + let uncle_reward = ommer_reward(base_block_reward, header.number(), uncle.number()); traces.push(reward_trace( header, RewardAction { - author: uncle.beneficiary, + author: uncle.beneficiary(), reward_type: RewardType::Uncle, value: U256::from(uncle_reward), }, @@ -715,10 +726,10 @@ struct TraceApiInner { /// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block /// beneficiary. -fn reward_trace(header: &Header, reward: RewardAction) -> LocalizedTransactionTrace { +fn reward_trace(header: &H, reward: RewardAction) -> LocalizedTransactionTrace { LocalizedTransactionTrace { block_hash: Some(header.hash_slow()), - block_number: Some(header.number), + block_number: Some(header.number()), transaction_hash: None, transaction_position: None, trace: TransactionTrace { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 5aea5be27d4..9c7973b14eb 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -24,7 +24,7 @@ use reth_primitives::{ Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, }; -use reth_primitives_traits::{Block, BlockBody}; +use reth_primitives_traits::BlockBody; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -1036,7 +1036,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions().to_vec())), ) } @@ -1047,7 +1047,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()), + |block_state, _| Some(block_state.block_ref().block().body.transactions().to_vec()), |_| true, ) } diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 2f1c9750edb..b2d2c1663ed 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -5,6 +5,9 @@ use reth_primitives_traits::BlockHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; +/// A helper type alias to access [`HeaderProvider::Header`]. +pub type ProviderHeader

=

::Header; + /// Client trait for fetching `Header` related data. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index aa238ded2f0..2859d71b9d1 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -43,6 +43,9 @@ pub type PeerId = alloy_primitives::B512; /// Helper type alias to access [`PoolTransaction::Consensus`] for a given [`TransactionPool`]. pub type PoolConsensusTx

= <

::Transaction as PoolTransaction>::Consensus; +/// Helper type alias to access [`PoolTransaction::Pooled`] for a given [`TransactionPool`]. +pub type PoolPooledTx

= <

::Transaction as PoolTransaction>::Pooled; + /// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, @@ -964,7 +967,7 @@ pub trait PoolTransaction: type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. - type Consensus; + type Consensus: From; /// Associated type representing the recovered pooled variant of the transaction. type Pooled: SignedTransaction; @@ -1003,6 +1006,11 @@ pub trait PoolTransaction: tx: RecoveredTx, ) -> Result, Self::TryFromConsensusError>; + /// Converts the `Pooled` type into the `Consensus` type. + fn pooled_into_consensus(tx: Self::Pooled) -> Self::Consensus { + tx.into() + } + /// Hash of the transaction. fn hash(&self) -> &TxHash; From 2f46fe6d48407044ad58a3ae37f59380b57044aa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 13:49:57 +0100 Subject: [PATCH 909/970] chore: use leaner NoopProvider in network (#13178) --- crates/net/network/Cargo.toml | 12 ++-- crates/net/network/src/config.rs | 2 +- crates/net/network/src/lib.rs | 6 +- crates/net/network/src/manager.rs | 2 +- crates/net/network/src/state.rs | 24 ++++---- crates/net/network/src/test_utils/testnet.rs | 7 ++- crates/net/network/src/transactions/mod.rs | 2 +- crates/net/network/tests/it/connect.rs | 15 +++-- crates/net/network/tests/it/session.rs | 2 +- crates/net/network/tests/it/startup.rs | 2 +- crates/storage/storage-api/src/noop.rs | 60 ++++++++++++++++++-- 11 files changed, 91 insertions(+), 43 deletions(-) diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 9e8c59226b2..97be9b1708e 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -30,7 +30,6 @@ reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true reth-storage-api.workspace = true -reth-provider = { workspace = true, optional = true } reth-tokio-util.workspace = true reth-consensus.workspace = true reth-network-peers = { workspace = true, features = ["net"] } @@ -90,7 +89,7 @@ reth-transaction-pool = { workspace = true, features = ["test-utils"] } # alloy deps for testing against nodes alloy-node-bindings.workspace = true -alloy-provider= { workspace = true, features = ["admin-api"] } +alloy-provider = { workspace = true, features = ["admin-api"] } alloy-consensus.workspace = true # misc @@ -112,7 +111,6 @@ serde = [ "reth-network-types/serde", "reth-dns-discovery/serde", "reth-eth-wire/serde", - "reth-provider?/serde", "reth-eth-wire-types/serde", "alloy-consensus/serde", "alloy-eips/serde", @@ -123,11 +121,10 @@ serde = [ "smallvec/serde", "url/serde", "reth-primitives-traits/serde", - "reth-ethereum-forks/serde" + "reth-ethereum-forks/serde", + "reth-provider/serde" ] test-utils = [ - "dep:reth-provider", - "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils", @@ -137,7 +134,8 @@ test-utils = [ "reth-network/test-utils", "reth-network-p2p/test-utils", "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils" ] [[bench]] diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 7d3f932b418..fb383b104a5 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -642,7 +642,7 @@ mod tests { use reth_chainspec::{Chain, MAINNET}; use reth_dns_discovery::tree::LinkEntry; use reth_primitives::ForkHash; - use reth_provider::test_utils::NoopProvider; + use reth_storage_api::noop::NoopProvider; use std::sync::Arc; fn builder() -> NetworkConfigBuilder { diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index fadeb1f6519..af5976ce5be 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -42,7 +42,7 @@ //! ### Configure and launch a standalone network //! //! The [`NetworkConfig`] is used to configure the network. -//! It requires an instance of [`BlockReader`](reth_provider::BlockReader). +//! It requires an instance of [`BlockReader`](reth_storage_api::BlockReader). //! //! ``` //! # async fn launch() { @@ -50,7 +50,7 @@ //! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, //! }; //! use reth_network_peers::mainnet_nodes; -//! use reth_provider::test_utils::NoopProvider; +//! use reth_storage_api::noop::NoopProvider; //! //! // This block provider implementation is used for testing purposes. //! let client = NoopProvider::default(); @@ -79,7 +79,7 @@ //! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, //! }; //! use reth_network_peers::mainnet_nodes; -//! use reth_provider::test_utils::NoopProvider; +//! use reth_storage_api::noop::NoopProvider; //! use reth_transaction_pool::TransactionPool; //! async fn launch(pool: Pool) { //! // This block provider implementation is used for testing purposes. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 5e580df883f..fed3f54408b 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -296,7 +296,7 @@ impl NetworkManager { /// config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, /// }; /// use reth_network_peers::mainnet_nodes; - /// use reth_provider::test_utils::NoopProvider; + /// use reth_storage_api::noop::NoopProvider; /// use reth_transaction_pool::TransactionPool; /// async fn launch(pool: Pool) { /// // This block provider implementation is used for testing purposes. diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 4dffadb2547..4bb82cf97c4 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -556,11 +556,13 @@ pub(crate) enum StateAction { #[cfg(test)] mod tests { - use std::{ - future::poll_fn, - sync::{atomic::AtomicU64, Arc}, + use crate::{ + discovery::Discovery, + fetch::StateFetcher, + peers::PeersManager, + state::{BlockNumReader, NetworkState}, + PeerRequest, }; - use alloy_consensus::Header; use alloy_primitives::B256; use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthNetworkPrimitives, EthVersion}; @@ -568,18 +570,14 @@ mod tests { use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_peers::PeerId; use reth_primitives::BlockBody; - use reth_provider::test_utils::NoopProvider; + use reth_storage_api::noop::NoopProvider; + use std::{ + future::poll_fn, + sync::{atomic::AtomicU64, Arc}, + }; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; - use crate::{ - discovery::Discovery, - fetch::StateFetcher, - peers::PeersManager, - state::{BlockNumReader, NetworkState}, - PeerRequest, - }; - /// Returns a testing instance of the [`NetworkState`]. fn state() -> NetworkState { let peers = PeersManager::default(); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 7fd9f690fde..ddb49f33b89 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -10,7 +10,7 @@ use crate::{ }; use futures::{FutureExt, StreamExt}; use pin_project::pin_project; -use reth_chainspec::{Hardforks, MAINNET}; +use reth_chainspec::{ChainSpecProvider, Hardforks, MAINNET}; use reth_eth_wire::{ protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols, }; @@ -21,8 +21,9 @@ use reth_network_api::{ }; use reth_network_peers::PeerId; use reth_primitives::{PooledTransactionsElement, TransactionSigned}; -use reth_provider::{test_utils::NoopProvider, ChainSpecProvider}; -use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; +use reth_storage_api::{ + noop::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, +}; use reth_tasks::TokioTaskExecutor; use reth_tokio_util::EventStream; use reth_transaction_pool::{ diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index fefff2bac35..1046e5facf9 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1905,7 +1905,7 @@ mod tests { error::{RequestError, RequestResult}, sync::{NetworkSyncUpdater, SyncState}, }; - use reth_provider::test_utils::NoopProvider; + use reth_storage_api::noop::NoopProvider; use reth_transaction_pool::test_utils::{ testing_pool, MockTransaction, MockTransactionFactory, TestPool, }; diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index cc545b626a0..dfa4ff16046 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -1,7 +1,5 @@ //! Connection tests -use std::{net::SocketAddr, time::Duration}; - use alloy_node_bindings::Geth; use alloy_primitives::map::HashSet; use alloy_provider::{ext::AdminApi, ProviderBuilder}; @@ -24,9 +22,10 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, }; use reth_network_peers::{mainnet_nodes, NodeRecord, TrustedPeer}; -use reth_provider::test_utils::NoopProvider; +use reth_storage_api::noop::NoopProvider; use reth_transaction_pool::test_utils::testing_pool; use secp256k1::SecretKey; +use std::{net::SocketAddr, time::Duration}; use tokio::task; use url::Host; @@ -99,7 +98,7 @@ async fn test_already_connected() { let p1 = PeerConfig::default(); // initialize two peers with the same identifier - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -143,7 +142,7 @@ async fn test_get_peer() { let client = NoopProvider::default(); let p1 = PeerConfig::default(); - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key_1); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -176,7 +175,7 @@ async fn test_get_peer_by_id() { let secret_key_1 = SecretKey::new(&mut rand::thread_rng()); let client = NoopProvider::default(); let p1 = PeerConfig::default(); - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key_1); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -234,7 +233,7 @@ async fn test_connect_with_builder() { let client = NoopProvider::default(); let config = NetworkConfigBuilder::::new(secret_key) .discovery(discv4) - .build(client); + .build(client.clone()); let (handle, network, _, requests) = NetworkManager::new(config) .await .unwrap() @@ -272,7 +271,7 @@ async fn test_connect_to_trusted_peer() { let client = NoopProvider::default(); let config = NetworkConfigBuilder::::new(secret_key) .discovery(discv4) - .build(client); + .build(client.clone()); let transactions_manager_config = config.transactions_manager_config.clone(); let (handle, network, transactions, requests) = NetworkManager::new(config) .await diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 71152c29bb8..53ab457eb0c 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -10,7 +10,7 @@ use reth_network_api::{ events::{PeerEvent, SessionInfo}, NetworkInfo, Peers, }; -use reth_provider::test_utils::NoopProvider; +use reth_storage_api::noop::NoopProvider; #[tokio::test(flavor = "multi_thread")] async fn test_session_established_with_highest_version() { diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 862281ab1ff..43b6a29e21a 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -11,7 +11,7 @@ use reth_network::{ Discovery, NetworkConfigBuilder, NetworkManager, }; use reth_network_api::{NetworkInfo, PeersInfo}; -use reth_provider::test_utils::NoopProvider; +use reth_storage_api::noop::NoopProvider; use secp256k1::SecretKey; use tokio::net::TcpListener; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 0a6341cc4b4..858c8e4c832 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -4,12 +4,12 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, - StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, + StorageRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, - BlockHashOrNumber, BlockId, + BlockHashOrNumber, BlockId, BlockNumberOrTag, }; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -23,7 +23,7 @@ use reth_primitives::{ use reth_primitives_traits::{Account, Bytecode, NodePrimitives, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_errors::provider::ProviderResult; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, }; @@ -476,6 +476,58 @@ impl StateProvider for NoopProvider { } } +impl StateProviderFactory for NoopProvider { + fn latest(&self) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_number_or_tag( + &self, + number_or_tag: BlockNumberOrTag, + ) -> ProviderResult { + match number_or_tag { + BlockNumberOrTag::Latest => self.latest(), + BlockNumberOrTag::Finalized => { + // we can only get the finalized state by hash, not by num + let hash = + self.finalized_block_hash()?.ok_or(ProviderError::FinalizedBlockNotFound)?; + + // only look at historical state + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Safe => { + // we can only get the safe state by hash, not by num + let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?; + + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Pending => self.pending(), + BlockNumberOrTag::Number(num) => self.history_by_block_number(num), + } + } + + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { + Ok(Some(Box::new(self.clone()))) + } +} + // impl EvmEnvProvider for NoopProvider { // fn fill_env_at( // &self, From 634db30b6b4ef394314759863dc61d79f3fbb796 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 6 Dec 2024 20:28:40 +0700 Subject: [PATCH 910/970] perf(tx-pool): reuse write lock to insert txs batch (#12806) --- crates/transaction-pool/src/pool/mod.rs | 101 ++++++++++++------------ 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 044b192fe59..c13dca17de0 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -83,7 +83,7 @@ use crate::{ }; use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; -use parking_lot::{Mutex, RwLock, RwLockReadGuard}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; @@ -425,6 +425,7 @@ where /// come in through that function, either as a batch or `std::iter::once`. fn add_transaction( &self, + pool: &mut RwLockWriteGuard<'_, TxPool>, origin: TransactionOrigin, tx: TransactionValidationOutcome, ) -> PoolResult { @@ -458,7 +459,7 @@ where origin, }; - let added = self.pool.write().add_transaction(tx, balance, state_nonce)?; + let added = pool.add_transaction(tx, balance, state_nonce)?; let hash = *added.hash(); // transaction was successfully inserted into the pool @@ -521,33 +522,52 @@ where } /// Adds all transactions in the iterator to the pool, returning a list of results. + /// + /// Note: A large batch may lock the pool for a long time that blocks important operations + /// like updating the pool on canonical state changes. The caller should consider having + /// a max batch size to balance transaction insertions with other updates. pub fn add_transactions( &self, origin: TransactionOrigin, transactions: impl IntoIterator>, ) -> Vec> { - let mut added = - transactions.into_iter().map(|tx| self.add_transaction(origin, tx)).collect::>(); + // Add the transactions and enforce the pool size limits in one write lock + let (mut added, discarded) = { + let mut pool = self.pool.write(); + let added = transactions + .into_iter() + .map(|tx| self.add_transaction(&mut pool, origin, tx)) + .collect::>(); + + // Enforce the pool size limits if at least one transaction was added successfully + let discarded = if added.iter().any(Result::is_ok) { + pool.discard_worst() + } else { + Default::default() + }; - // If at least one transaction was added successfully, then we enforce the pool size limits. - let discarded = - if added.iter().any(Result::is_ok) { self.discard_worst() } else { Default::default() }; + (added, discarded) + }; - if discarded.is_empty() { - return added - } + if !discarded.is_empty() { + // Delete any blobs associated with discarded blob transactions + self.delete_discarded_blobs(discarded.iter()); - { - let mut listener = self.event_listener.write(); - discarded.iter().for_each(|tx| listener.discarded(tx)); - } + let discarded_hashes = + discarded.into_iter().map(|tx| *tx.hash()).collect::>(); + + { + let mut listener = self.event_listener.write(); + discarded_hashes.iter().for_each(|hash| listener.discarded(hash)); + } - // It may happen that a newly added transaction is immediately discarded, so we need to - // adjust the result here - for res in &mut added { - if let Ok(hash) = res { - if discarded.contains(hash) { - *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) + // A newly added transaction may be immediately discarded, so we need to + // adjust the result here + for res in &mut added { + if let Ok(hash) = res { + if discarded_hashes.contains(hash) { + *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) + } } } } @@ -883,20 +903,6 @@ where self.pool.read().is_exceeded() } - /// Enforces the size limits of pool and returns the discarded transactions if violated. - /// - /// If some of the transactions are blob transactions, they are also removed from the blob - /// store. - pub fn discard_worst(&self) -> HashSet { - let discarded = self.pool.write().discard_worst(); - - // delete any blobs associated with discarded blob transactions - self.delete_discarded_blobs(discarded.iter()); - - // then collect into tx hashes - discarded.into_iter().map(|tx| *tx.hash()).collect() - } - /// Inserts a blob transaction into the blob store fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) { debug!(target: "txpool", "[{:?}] storing blob sidecar", hash); @@ -1305,23 +1311,18 @@ mod tests { } // Add the transaction to the pool with external origin and valid outcome. - test_pool - .add_transaction( - TransactionOrigin::External, - TransactionValidationOutcome::Valid { - balance: U256::from(1_000), - state_nonce: 0, - transaction: ValidTransaction::ValidWithSidecar { - transaction: tx, - sidecar: sidecar.clone(), - }, - propagate: true, + test_pool.add_transactions( + TransactionOrigin::External, + [TransactionValidationOutcome::Valid { + balance: U256::from(1_000), + state_nonce: 0, + transaction: ValidTransaction::ValidWithSidecar { + transaction: tx, + sidecar: sidecar.clone(), }, - ) - .unwrap(); - - // Evict the worst transactions from the pool. - test_pool.discard_worst(); + propagate: true, + }], + ); } // Assert that the size of the pool's blob component is equal to the maximum blob limit. From fdff4f18f251ce6ce56f03b31e4b31ddadf0b75a Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Fri, 6 Dec 2024 20:58:17 +0700 Subject: [PATCH 911/970] feat(`DbTx`): add `get_by_encoded_key` (#13171) --- crates/storage/db-api/src/mock.rs | 9 +++- crates/storage/db-api/src/transaction.rs | 11 +++- crates/storage/db/Cargo.toml | 5 ++ crates/storage/db/benches/get.rs | 52 +++++++++++++++++++ .../storage/db/src/implementation/mdbx/tx.rs | 9 +++- testing/ef-tests/src/models.rs | 9 ++-- 6 files changed, 88 insertions(+), 7 deletions(-) create mode 100644 crates/storage/db/benches/get.rs diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index e972821d8fe..5580727fdbe 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -7,7 +7,7 @@ use crate::{ ReverseWalker, Walker, }, database::Database, - table::{DupSort, Table, TableImporter}, + table::{DupSort, Encode, Table, TableImporter}, transaction::{DbTx, DbTxMut}, DatabaseError, }; @@ -49,6 +49,13 @@ impl DbTx for TxMock { Ok(None) } + fn get_by_encoded_key( + &self, + _key: &::Encoded, + ) -> Result, DatabaseError> { + Ok(None) + } + fn commit(self) -> Result { Ok(true) } diff --git a/crates/storage/db-api/src/transaction.rs b/crates/storage/db-api/src/transaction.rs index f39cf92fb61..6dc79670a65 100644 --- a/crates/storage/db-api/src/transaction.rs +++ b/crates/storage/db-api/src/transaction.rs @@ -1,6 +1,6 @@ use crate::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - table::{DupSort, Table}, + table::{DupSort, Encode, Table}, DatabaseError, }; @@ -11,8 +11,15 @@ pub trait DbTx: Send + Sync { /// `DupCursor` type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; - /// Get value + /// Get value by an owned key fn get(&self, key: T::Key) -> Result, DatabaseError>; + /// Get value by a reference to the encoded key, especially useful for "raw" keys + /// that encode to themselves like Address and B256. Doesn't need to clone a + /// reference key like `get`. + fn get_by_encoded_key( + &self, + key: &::Encoded, + ) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages fn commit(self) -> Result; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 4a4eff47123..fd313a40ae5 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -129,3 +129,8 @@ harness = false name = "iai" required-features = ["test-utils"] harness = false + +[[bench]] +name = "get" +required-features = ["test-utils"] +harness = false diff --git a/crates/storage/db/benches/get.rs b/crates/storage/db/benches/get.rs new file mode 100644 index 00000000000..04eda02e05e --- /dev/null +++ b/crates/storage/db/benches/get.rs @@ -0,0 +1,52 @@ +#![allow(missing_docs)] + +use alloy_primitives::TxHash; +use criterion::{criterion_group, criterion_main, Criterion}; +use pprof::criterion::{Output, PProfProfiler}; +use reth_db::{test_utils::create_test_rw_db_with_path, Database, TransactionHashNumbers}; +use reth_db_api::transaction::DbTx; +use std::{fs, sync::Arc}; + +mod utils; +use utils::BENCH_DB_PATH; + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(1, Output::Flamegraph(None))); + targets = get +} +criterion_main!(benches); + +// Small benchmark showing that [get_by_encoded_key] is slightly faster than [get] +// for a reference key, as [get] requires copying or cloning the key first. +fn get(c: &mut Criterion) { + let mut group = c.benchmark_group("Get"); + + // Random keys to get + let mut keys = Vec::new(); + for _ in 0..10_000_000 { + let key = TxHash::random(); + keys.push(key); + } + + // We don't bother mock the DB to reduce noise from DB I/O, value decoding, etc. + let _ = fs::remove_dir_all(BENCH_DB_PATH); + let db = Arc::try_unwrap(create_test_rw_db_with_path(BENCH_DB_PATH)).unwrap(); + let tx = db.tx().expect("tx"); + + group.bench_function("get", |b| { + b.iter(|| { + for key in &keys { + tx.get::(*key).unwrap(); + } + }) + }); + + group.bench_function("get_by_encoded_key", |b| { + b.iter(|| { + for key in &keys { + tx.get_by_encoded_key::(key).unwrap(); + } + }) + }); +} diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 2ff2789ea69..09be53a5ffb 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -283,8 +283,15 @@ impl DbTx for Tx { type DupCursor = Cursor; fn get(&self, key: T::Key) -> Result::Value>, DatabaseError> { + self.get_by_encoded_key::(&key.encode()) + } + + fn get_by_encoded_key( + &self, + key: &::Encoded, + ) -> Result, DatabaseError> { self.execute_with_operation_metric::(Operation::Get, None, |tx| { - tx.get(self.get_dbi::()?, key.encode().as_ref()) + tx.get(self.get_dbi::()?, key.as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? .map(decode_one::) .transpose() diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 160b0ec1d0c..7f6c0cdae34 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -219,9 +219,12 @@ impl Account { /// /// In case of a mismatch, `Err(Error::Assertion)` is returned. pub fn assert_db(&self, address: Address, tx: &impl DbTx) -> Result<(), Error> { - let account = tx.get::(address)?.ok_or_else(|| { - Error::Assertion(format!("Expected account ({address}) is missing from DB: {self:?}")) - })?; + let account = + tx.get_by_encoded_key::(&address)?.ok_or_else(|| { + Error::Assertion(format!( + "Expected account ({address}) is missing from DB: {self:?}" + )) + })?; assert_equal(self.balance, account.balance, "Balance does not match")?; assert_equal(self.nonce.to(), account.nonce, "Nonce does not match")?; From cb3e9f8441272fe6e49a840dbaabe8d86175b8f8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 15:01:13 +0100 Subject: [PATCH 912/970] chore: remove some excessive allocs in hot path (#13176) --- crates/net/network/src/transactions/fetcher.rs | 11 ++--------- crates/net/network/src/transactions/mod.rs | 14 ++------------ 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 025ae36ea14..2fa900d416f 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -278,7 +278,6 @@ impl TransactionFetcher { + IntoIterator)>, ) -> RequestTxHashes { let mut acc_size_response = 0; - let hashes_from_announcement_len = hashes_from_announcement.len(); let mut hashes_from_announcement_iter = hashes_from_announcement.into_iter(); @@ -292,7 +291,7 @@ impl TransactionFetcher { acc_size_response = size; } - let mut surplus_hashes = RequestTxHashes::with_capacity(hashes_from_announcement_len - 1); + let mut surplus_hashes = RequestTxHashes::default(); // folds size based on expected response size and adds selected hashes to the request // list and the other hashes to the surplus list @@ -326,8 +325,6 @@ impl TransactionFetcher { } surplus_hashes.extend(hashes_from_announcement_iter.map(|(hash, _metadata)| hash)); - surplus_hashes.shrink_to_fit(); - hashes_to_request.shrink_to_fit(); surplus_hashes } @@ -432,8 +429,7 @@ impl TransactionFetcher { peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, ) { - let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let mut hashes_to_request = RequestTxHashes::default(); let is_session_active = |peer_id: &PeerId| peers.contains_key(peer_id); let mut search_durations = TxFetcherSearchDurations::default(); @@ -482,9 +478,6 @@ impl TransactionFetcher { search_durations.fill_request ); - // free unused memory - hashes_to_request.shrink_to_fit(); - self.update_pending_fetch_cache_search_metrics(search_durations); trace!(target: "net::tx", diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 1046e5facf9..83674c96c51 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -640,16 +640,8 @@ where return } - // load message version before announcement data type is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - // - // demand recommended soft limit on response, however the peer may enforce an arbitrary - // limit on the response (2MB) - // - // request buffer is shrunk via call to pack request! - let init_capacity_req = - self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let mut hashes_to_request = + RequestTxHashes::with_capacity(valid_announcement_data.len() / 4); let surplus_hashes = self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); @@ -657,7 +649,6 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), surplus_hashes=?*surplus_hashes, - %msg_version, %client, "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" ); @@ -668,7 +659,6 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*hashes_to_request, - %msg_version, %client, "sending hashes in `GetPooledTransactions` request to peer's session" ); From e29b4eec482d6762a44df9782317f7e531867f6d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 6 Dec 2024 14:02:03 +0000 Subject: [PATCH 913/970] fix(trie): delete self destructed accounts from sparse trie (#13168) --- crates/engine/tree/src/tree/root.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index bb936c28341..2d00feba50d 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -265,7 +265,7 @@ where trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); - let info = if account.is_empty() { None } else { Some(account.info.into()) }; + let info = if destroyed { None } else { Some(account.info.into()) }; hashed_state_update.accounts.insert(hashed_address, info); let mut changed_storage_iter = account From 806a1b1e88c6845a4b40f8021483735be03c56c3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 17:21:04 +0100 Subject: [PATCH 914/970] chore: use slice arg for tx decoding (#13181) --- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc-eth-types/src/utils.rs | 13 +++++++------ crates/rpc/rpc/src/eth/bundle.rs | 2 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 5 ++--- crates/rpc/rpc/src/trace.rs | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index c1e0a730198..d455d8e897e 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -32,7 +32,7 @@ where /// /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(tx.clone())?; + let recovered = recover_raw_transaction(&tx)?; let pool_transaction = ::Transaction::from_pooled(recovered); // On optimism, transactions are forwarded directly to the sequencer to be included in diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 968c49a1304..364ea27cc31 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -343,7 +343,7 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send { async move { - let recovered = recover_raw_transaction(tx)?; + let recovered = recover_raw_transaction(&tx)?; let pool_transaction = ::Transaction::from_pooled(recovered); diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 64f159ea0e8..f12c819aea3 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,22 +1,23 @@ //! Commonly used code snippets -use alloy_primitives::Bytes; +use super::{EthApiError, EthResult}; use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredTx}; use reth_primitives_traits::SignedTransaction; use std::future::Future; -use super::{EthApiError, EthResult}; - /// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. /// +/// This is a helper function that returns the appropriate RPC-specific error if the input data is +/// malformed. +/// /// See [`alloy_eips::eip2718::Decodable2718::decode_2718`] -pub fn recover_raw_transaction(data: Bytes) -> EthResult> { +pub fn recover_raw_transaction(mut data: &[u8]) -> EthResult> { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } - let transaction = T::decode_2718(&mut data.as_ref()) - .map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; + let transaction = + T::decode_2718(&mut data).map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; transaction.try_into_ecrecovered().or(Err(EthApiError::InvalidTransactionSignature)) } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 4866033c4bc..ba142651fd9 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -90,7 +90,7 @@ where let transactions = txs .into_iter() - .map(recover_raw_transaction::>) + .map(|tx| recover_raw_transaction::>(&tx)) .collect::, _>>()? .into_iter() .map(|tx| tx.to_components()) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index f4e77b4fbe5..6cfeb0934f4 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -171,9 +171,8 @@ where while idx < body.len() { match &body[idx] { BundleItem::Tx { tx, can_revert } => { - let recovered_tx = - recover_raw_transaction::>(tx.clone()) - .map_err(EthApiError::from)?; + let recovered_tx = recover_raw_transaction::>(tx) + .map_err(EthApiError::from)?; let (tx, signer) = recovered_tx.to_components(); let tx: PoolConsensusTx = ::Transaction::pooled_into_consensus(tx); diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 44867f51378..5f1bbb7439d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -117,7 +117,7 @@ where trace_types: HashSet, block_id: Option, ) -> Result { - let tx = recover_raw_transaction::>(tx)? + let tx = recover_raw_transaction::>(&tx)? .map_transaction(::Transaction::pooled_into_consensus); let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; From 627ceae86b8c3d70eb272cac0b52ed9a5301d612 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:24:54 +0100 Subject: [PATCH 915/970] feat(db): initialize db with tables (#13130) Co-authored-by: Matthias Seitz --- crates/storage/db-api/src/table.rs | 9 +++++++ .../storage/db/src/implementation/mdbx/mod.rs | 19 ++++++++------ crates/storage/db/src/tables/mod.rs | 26 ++++++++++--------- 3 files changed, 34 insertions(+), 20 deletions(-) diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index acdc8efc78f..a4d3f87b40b 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -100,6 +100,15 @@ pub trait Table: Send + Sync + Debug + 'static { type Value: Value; } +/// Trait that provides object-safe access to the table's metadata. +pub trait TableInfo: Send + Sync + Debug + 'static { + /// The table's name. + fn name(&self) -> &'static str; + + /// Whether the table is a `DUPSORT` table. + fn is_dupsort(&self) -> bool; +} + /// Tuple with `T::Key` and `T::Value`. pub type TableRow = (::Key, ::Value); diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8a6811b1539..8c3d3630889 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -3,9 +3,9 @@ use crate::{ lockfile::StorageLock, metrics::DatabaseEnvMetrics, - tables::{self, TableType, Tables}, + tables::{self, Tables}, utils::default_page_size, - DatabaseError, + DatabaseError, TableSet, }; use eyre::Context; use metrics::{gauge, Label}; @@ -444,15 +444,18 @@ impl DatabaseEnv { self } - /// Creates all the defined tables, if necessary. + /// Creates all the tables defined in [`Tables`], if necessary. pub fn create_tables(&self) -> Result<(), DatabaseError> { + self.create_tables_for::() + } + + /// Creates all the tables defined in the given [`TableSet`], if necessary. + pub fn create_tables_for(&self) -> Result<(), DatabaseError> { let tx = self.inner.begin_rw_txn().map_err(|e| DatabaseError::InitTx(e.into()))?; - for table in Tables::ALL { - let flags = match table.table_type() { - TableType::Table => DatabaseFlags::default(), - TableType::DupSort => DatabaseFlags::DUP_SORT, - }; + for table in TS::tables() { + let flags = + if table.is_dupsort() { DatabaseFlags::DUP_SORT } else { DatabaseFlags::default() }; tx.create_db(Some(table.name()), flags) .map_err(|e| DatabaseError::CreateTable(e.into()))?; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 88cfdde44aa..961fd41e97c 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -29,7 +29,7 @@ use reth_db_api::{ AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals, }, - table::{Decode, DupSort, Encode, Table}, + table::{Decode, DupSort, Encode, Table, TableInfo}, }; use reth_primitives::{Receipt, StorageEntry, TransactionSigned}; use reth_primitives_traits::{Account, Bytecode}; @@ -101,10 +101,8 @@ pub trait TableViewer { /// General trait for defining the set of tables /// Used to initialize database pub trait TableSet { - /// Returns all the table names in the database. - fn table_names(&self) -> Vec<&'static str>; - /// Returns `true` if the table at the given index is a `DUPSORT` table. - fn is_dupsort(&self, idx: usize) -> bool; + /// Returns an iterator over the tables + fn tables() -> Box>>; } /// Defines all the tables in the database. @@ -252,15 +250,19 @@ macro_rules! tables { } } - impl TableSet for Tables { - fn table_names(&self) -> Vec<&'static str> { - //vec![$(table_names::$name,)*] - Self::ALL.iter().map(|t| t.name()).collect() + impl TableInfo for Tables { + fn name(&self) -> &'static str { + self.name() } - fn is_dupsort(&self, idx: usize) -> bool { - let table: Self = self.table_names()[idx].parse().expect("should be valid table name"); - table.is_dupsort() + fn is_dupsort(&self) -> bool { + self.is_dupsort() + } + } + + impl TableSet for Tables { + fn tables() -> Box>> { + Box::new(Self::ALL.iter().map(|table| Box::new(*table) as Box)) } } From c9c6eb5aaa07e42033da9e34b1279cc7b1fb4e5a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 18:08:12 +0100 Subject: [PATCH 916/970] chore: rm unused evm provider fn (#13184) --- crates/evm/src/provider.rs | 12 ------------ .../provider/src/providers/blockchain_provider.rs | 13 ------------- .../storage/provider/src/providers/consistent.rs | 15 --------------- .../provider/src/providers/database/mod.rs | 13 ------------- .../provider/src/providers/database/provider.rs | 15 --------------- crates/storage/provider/src/providers/mod.rs | 13 ------------- crates/storage/provider/src/test_utils/mock.rs | 13 ------------- crates/storage/provider/src/test_utils/noop.rs | 13 ------------- 8 files changed, 107 deletions(-) diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index ec2f1803da0..6ef4cefbb48 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -13,18 +13,6 @@ use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] pub trait EvmEnvProvider: Send + Sync { - /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// [BlockHashOrNumber]. - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv

; - /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the /// given block header. fn env_with_header( diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 521e1d959b3..68f1498eccb 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -493,19 +493,6 @@ impl StageCheckpointReader for BlockchainProvider2 { } impl EvmEnvProvider> for BlockchainProvider2 { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 9c7973b14eb..927a78fe19e 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1231,21 +1231,6 @@ impl StageCheckpointReader for ConsistentProvider { } impl EvmEnvProvider> for ConsistentProvider { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 39230e253ed..85b734ef661 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -590,19 +590,6 @@ impl StageCheckpointReader for ProviderFactory { } impl EvmEnvProvider> for ProviderFactory { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9dddbb9c0a7..05e4ed4c0c0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1638,21 +1638,6 @@ impl> Withdrawals impl EvmEnvProvider> for DatabaseProvider { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 44cd5554bee..b4a99541a89 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -591,19 +591,6 @@ impl StageCheckpointReader for BlockchainProvider { } impl EvmEnvProvider for BlockchainProvider { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - self.database.provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 6815bbcb123..abe1096a1bc 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -717,19 +717,6 @@ impl StateProvider for MockEthProvider { } impl EvmEnvProvider for MockEthProvider { - fn fill_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - fn fill_env_with_header( &self, _cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index a33e4159be2..3846313b9f4 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -436,19 +436,6 @@ impl StateProvider for NoopProvider { } impl EvmEnvProvider for NoopProvider { - fn fill_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - fn fill_env_with_header( &self, _cfg: &mut CfgEnvWithHandlerCfg, From 55f931d0b9c6e188b441d80f8515a0a5ad680586 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 19:28:27 +0100 Subject: [PATCH 917/970] chore: introduce ethereum-primitives crate (#13185) --- .github/assets/check_rv32imac.sh | 1 + Cargo.lock | 4 ++++ Cargo.toml | 2 ++ crates/ethereum/primitives/Cargo.toml | 18 ++++++++++++++++++ crates/ethereum/primitives/src/lib.rs | 10 ++++++++++ 5 files changed, 35 insertions(+) create mode 100644 crates/ethereum/primitives/Cargo.toml create mode 100644 crates/ethereum/primitives/src/lib.rs diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index 0556fa31dea..ab1151bfb0c 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -5,6 +5,7 @@ set +e # Disable immediate exit on error crates_to_check=( reth-codecs-derive reth-ethereum-forks + reth-ethereum-primitives reth-primitives-traits reth-optimism-forks # reth-evm diff --git a/Cargo.lock b/Cargo.lock index 37d05d1e0c4..cd893495bc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7568,6 +7568,10 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-ethereum-primitives" +version = "1.1.2" + [[package]] name = "reth-etl" version = "1.1.2" diff --git a/Cargo.toml b/Cargo.toml index 4233750d076..08fa42e1046 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ members = [ "crates/ethereum/evm", "crates/ethereum/node", "crates/ethereum/payload/", + "crates/ethereum/primitives/", "crates/etl/", "crates/evm/", "crates/evm/execution-errors", @@ -341,6 +342,7 @@ reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks", default-features = false } reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } +reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml new file mode 100644 index 00000000000..a016d7dd652 --- /dev/null +++ b/crates/ethereum/primitives/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-ethereum-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum primitive types" + +[lints] +workspace = true + +[dependencies] + +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/ethereum/primitives/src/lib.rs b/crates/ethereum/primitives/src/lib.rs new file mode 100644 index 00000000000..78bb5d75f19 --- /dev/null +++ b/crates/ethereum/primitives/src/lib.rs @@ -0,0 +1,10 @@ +//! Standalone crate for ethereum-specific Reth primitive types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(not(feature = "std"), no_std)] From cd13bd91cd8caa6b8c62fff904c0ce84e3fa455e Mon Sep 17 00:00:00 2001 From: Harsh Vardhan Roy <42067944+royvardhan@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:42:44 +0530 Subject: [PATCH 918/970] feat: unify ReceiptWithBloom from Alloy (#13088) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 2 + crates/ethereum/consensus/src/validation.rs | 5 +- crates/net/eth-wire-types/src/receipts.rs | 10 +- crates/net/network/src/message.rs | 2 +- crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/proof.rs | 29 +- crates/optimism/consensus/src/validation.rs | 3 +- crates/primitives/src/lib.rs | 6 +- crates/primitives/src/proofs.rs | 23 +- crates/primitives/src/receipt.rs | 549 +++++++------------- crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- crates/trie/sparse/src/state.rs | 1 - crates/trie/trie/Cargo.toml | 2 + crates/trie/trie/benches/trie_root.rs | 22 +- 14 files changed, 236 insertions(+), 421 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd893495bc2..fb3910d40e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8351,6 +8351,7 @@ name = "reth-optimism-consensus" version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-trie", "reth-chainspec", @@ -9455,6 +9456,7 @@ name = "reth-trie" version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-trie", diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index f990ecc57d8..c339c8d25c6 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{proofs::calculate_receipt_root, TxReceipt}; use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; @@ -62,10 +63,10 @@ fn verify_receipts( ) -> Result<(), ConsensusError> { // Calculate receipts root. let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root_ref(&receipts_with_bloom); + let receipts_root = calculate_receipt_root(&receipts_with_bloom); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index ca5e85a146f..2bad4287f2e 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// A request for transaction receipts from the given block hashes. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -23,7 +23,7 @@ pub struct GetReceipts( #[add_arbitrary_tests(rlp)] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. - pub Vec>, + pub Vec>>, ); #[cfg(test)] @@ -37,7 +37,7 @@ mod tests { fn roundtrip_eip1559() { let receipts = Receipts(vec![vec![ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() }, - bloom: Default::default(), + logs_bloom: Default::default(), }]]); let mut out = vec![]; @@ -108,7 +108,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ]]), }; @@ -145,7 +145,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ], ]), diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index e88ccb54c36..ff5093b6732 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -150,7 +150,7 @@ pub enum PeerResponseResult { /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. - Receipts(RequestResult>>), + Receipts(RequestResult>>>), } // === impl PeerResponseResult === diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index faece6eacf8..4f4868a454d 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -26,6 +26,7 @@ reth-optimism-chainspec.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-trie.workspace = true diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 18e64a467ff..df0669568b3 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,14 +1,15 @@ //! Helper function for Receipt root calculation for Optimism hardforks. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; use reth_optimism_forks::OpHardfork; -use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( - receipts: &[ReceiptWithBloom], + receipts: &[ReceiptWithBloom], chain_spec: &ChainSpec, timestamp: u64, ) -> B256 { @@ -29,12 +30,10 @@ pub(crate) fn calculate_receipt_root_optimism( }) .collect::>(); - return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| { - r.encode_inner(buf, false) - }) + return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| r.encode_2718(buf)) } - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf)) } /// Calculates the receipt root for a header for the reference type of [Receipt]. @@ -63,12 +62,12 @@ pub fn calculate_receipt_root_no_memo_optimism( .collect::>(); return ordered_trie_root_with_encoder(&receipts, |r, buf| { - ReceiptWithBloomRef::from(r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } @@ -123,7 +122,7 @@ mod tests { deposit_nonce: Some(4012991u64), deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), }, // 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a ReceiptWithBloom { @@ -169,7 +168,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), + logs_bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), }, // 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266 ReceiptWithBloom { @@ -211,7 +210,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), }, // 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f ReceiptWithBloom { @@ -283,7 +282,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), + logs_bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), }, // 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351 ReceiptWithBloom { @@ -325,7 +324,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), }, ]; let root = calculate_receipt_root_optimism(&receipts, BASE_SEPOLIA.as_ref(), case.1); @@ -339,7 +338,7 @@ mod tests { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), }]; - let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); + let logs_bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); let receipt = ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip2930, @@ -349,7 +348,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom, + logs_bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0); diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 3a76ec13854..5290603e7b8 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,5 @@ use crate::proof::calculate_receipt_root_optimism; +use alloy_consensus::TxReceipt; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; @@ -57,7 +58,7 @@ fn verify_receipts( calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2844c9397b8..edbc73a9362 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -40,9 +40,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use receipt::{ - gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, -}; +pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, LogData, NodePrimitives, SealedHeader, StorageEntry, @@ -56,6 +54,8 @@ pub use transaction::{ TransactionSigned, TransactionSignedEcRecovered, TxType, }; +pub use alloy_consensus::ReceiptWithBloom; + // Re-exports pub use reth_ethereum_forks::*; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 2a1d5b6982b..4711da0934c 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,9 +1,12 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use crate::Receipt; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; +pub use alloy_consensus::proofs::calculate_receipt_root; + /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. @@ -18,23 +21,11 @@ pub use alloy_consensus::proofs::calculate_withdrawals_root; #[doc(inline)] pub use alloy_consensus::proofs::calculate_ommers_root; -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - /// Calculates the receipt root for a header for the reference type of [Receipt]. /// /// NOTE: Prefer [`calculate_receipt_root`] if you have log blooms memoized. pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) - }) + ordered_trie_root_with_encoder(receipts, |r, buf| r.with_bloom_ref().encode_2718(buf)) } #[cfg(test)] @@ -67,6 +58,8 @@ mod tests { #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { + use alloy_consensus::ReceiptWithBloom; + let logs = vec![Log { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), @@ -79,7 +72,7 @@ mod tests { cumulative_gas_used: 102068, logs, }, - bloom, + logs_bloom: bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root(&receipt); diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 2e8e269e711..419c36c2080 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,15 +1,13 @@ use alloc::{vec, vec::Vec}; -use core::cmp::Ordering; use reth_primitives_traits::InMemorySize; use alloy_consensus::{ - constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - Eip658Value, TxReceipt, Typed2718, + Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, + RlpEncodableReceipt, TxReceipt, Typed2718, }; -use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Bloom, Log, B256}; -use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; -use bytes::{Buf, BufMut}; +use alloy_rlp::{Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; +use bytes::BufMut; use derive_more::{DerefMut, From, IntoIterator}; use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; @@ -61,15 +59,180 @@ impl Receipt { /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container /// type. - pub fn with_bloom(self) -> ReceiptWithBloom { + pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } - /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloomRef`] + /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] /// container type. - pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { + pub fn with_bloom_ref(&self) -> ReceiptWithBloom<&Self> { self.into() } + + /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { + let len = self.success.length() + + self.cumulative_gas_used.length() + + bloom.length() + + self.logs.length(); + + #[cfg(feature = "optimism")] + if self.tx_type == TxType::Deposit { + let mut len = len; + + if let Some(deposit_nonce) = self.deposit_nonce { + len += deposit_nonce.length(); + } + if let Some(deposit_receipt_version) = self.deposit_receipt_version { + len += deposit_receipt_version.length(); + } + + return len + } + + len + } + + /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { + self.success.encode(out); + self.cumulative_gas_used.encode(out); + bloom.encode(out); + self.logs.encode(out); + + #[cfg(feature = "optimism")] + if self.tx_type == TxType::Deposit { + if let Some(nonce) = self.deposit_nonce { + nonce.encode(out); + } + if let Some(version) = self.deposit_receipt_version { + version.encode(out); + } + } + } + + /// Returns RLP header for inner encoding. + pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } + } + + fn decode_receipt_with_bloom( + buf: &mut &[u8], + tx_type: TxType, + ) -> alloy_rlp::Result> { + let b = &mut &**buf; + let rlp_head = alloy_rlp::Header::decode(b)?; + if !rlp_head.list { + return Err(alloy_rlp::Error::UnexpectedString) + } + let started_len = b.len(); + + let success = Decodable::decode(b)?; + let cumulative_gas_used = Decodable::decode(b)?; + let bloom = Decodable::decode(b)?; + let logs = Decodable::decode(b)?; + + let receipt = match tx_type { + #[cfg(feature = "optimism")] + TxType::Deposit => { + let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; + let deposit_nonce = remaining(b).then(|| Decodable::decode(b)).transpose()?; + let deposit_receipt_version = + remaining(b).then(|| Decodable::decode(b)).transpose()?; + + Self { + tx_type, + success, + cumulative_gas_used, + logs, + deposit_nonce, + deposit_receipt_version, + } + } + _ => Self { + tx_type, + success, + cumulative_gas_used, + logs, + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + }; + + let this = ReceiptWithBloom { receipt, logs_bloom: bloom }; + let consumed = started_len - b.len(); + if consumed != rlp_head.payload_length { + return Err(alloy_rlp::Error::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + *buf = *b; + Ok(this) + } +} + +impl Eip2718EncodableReceipt for Receipt { + fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + self.rlp_header_inner(bloom).length_with_payload() + + !matches!(self.tx_type, TxType::Legacy) as usize // account for type prefix + } + + fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + out.put_u8(self.tx_type as u8); + } + self.rlp_header_inner(bloom).encode(out); + self.rlp_encode_fields(bloom, out); + } +} + +impl RlpEncodableReceipt for Receipt { + fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + let mut len = self.eip2718_encoded_length_with_bloom(bloom); + if !matches!(self.tx_type, TxType::Legacy) { + len += Header { + list: false, + payload_length: self.eip2718_encoded_length_with_bloom(bloom), + } + .length(); + } + + len + } + + fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } + .encode(out); + } + self.eip2718_encode_with_bloom(bloom, out); + } +} + +impl RlpDecodableReceipt for Receipt { + fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { + let header_buf = &mut &**buf; + let header = Header::decode(header_buf)?; + + if header.list { + return Self::decode_receipt_with_bloom(buf, TxType::Legacy); + } + + *buf = *header_buf; + + let remaining = buf.len(); + let tx_type = TxType::decode(buf)?; + let this = Self::decode_receipt_with_bloom(buf, tx_type)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(this) + } } impl TxReceipt for Receipt { @@ -183,51 +346,12 @@ impl FromIterator>> for Receipts { } } -impl From for ReceiptWithBloom { - fn from(receipt: Receipt) -> Self { - let bloom = receipt.bloom_slow(); - Self { receipt, bloom } - } -} - impl Default for Receipts { fn default() -> Self { Self { receipt_vec: Vec::new() } } } -/// [`Receipt`] with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -pub struct ReceiptWithBloom { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: Receipt, -} - -impl ReceiptWithBloom { - /// Create new [`ReceiptWithBloom`] - pub const fn new(receipt: Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } - } - - /// Consume the structure, returning only the receipt - pub fn into_receipt(self) -> Receipt { - self.receipt - } - - /// Consume the structure, returning the receipt and the bloom filter - pub fn into_components(self) -> (Receipt, Bloom) { - (self.receipt, self.bloom) - } - - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: &self.receipt, bloom: &self.bloom } - } -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Receipt { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -260,317 +384,10 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { } } -impl Encodable2718 for ReceiptWithBloom { - fn type_flag(&self) -> Option { - match self.receipt.tx_type { - TxType::Legacy => None, - tx_type => Some(tx_type as u8), - } - } - - fn encode_2718_len(&self) -> usize { - let encoder = self.as_encoder(); - match self.receipt.tx_type { - TxType::Legacy => encoder.receipt_length(), - _ => 1 + encoder.receipt_length(), // 1 byte for the type prefix - } - } - - /// Encodes the receipt into its "raw" format. - /// This format is also referred to as "binary" encoding. - /// - /// For legacy receipts, it encodes the RLP of the receipt into the buffer: - /// `rlp([status, cumulativeGasUsed, logsBloom, logs])` as per EIP-2718. - /// For EIP-2718 typed transactions, it encodes the type of the transaction followed by the rlp - /// of the receipt: - /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, - /// logsBloom, logs])` - fn encode_2718(&self, out: &mut dyn BufMut) { - self.encode_inner(out, false) - } - - fn encoded_2718(&self) -> Vec { - let mut out = vec![]; - self.encode_2718(&mut out); - out - } -} - -impl ReceiptWithBloom { - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) - } - - /// Decodes the receipt payload - fn decode_receipt(buf: &mut &[u8], tx_type: TxType) -> alloy_rlp::Result { - let b = &mut &**buf; - let rlp_head = alloy_rlp::Header::decode(b)?; - if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString) - } - let started_len = b.len(); - - let success = alloy_rlp::Decodable::decode(b)?; - let cumulative_gas_used = alloy_rlp::Decodable::decode(b)?; - let bloom = Decodable::decode(b)?; - let logs = alloy_rlp::Decodable::decode(b)?; - - let receipt = match tx_type { - #[cfg(feature = "optimism")] - TxType::Deposit => { - let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; - let deposit_nonce = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; - let deposit_receipt_version = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; - - Receipt { - tx_type, - success, - cumulative_gas_used, - logs, - deposit_nonce, - deposit_receipt_version, - } - } - _ => Receipt { - tx_type, - success, - cumulative_gas_used, - logs, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - }; - - let this = Self { receipt, bloom }; - let consumed = started_len - b.len(); - if consumed != rlp_head.payload_length { - return Err(alloy_rlp::Error::ListLengthMismatch { - expected: rlp_head.payload_length, - got: consumed, - }) - } - *buf = *b; - Ok(this) - } -} - -impl Encodable for ReceiptWithBloom { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) - } - fn length(&self) -> usize { - self.as_encoder().length() - } -} - -impl Decodable for ReceiptWithBloom { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - // a receipt is either encoded as a string (non legacy) or a list (legacy). - // We should not consume the buffer if we are decoding a legacy receipt, so let's - // check if the first byte is between 0x80 and 0xbf. - let rlp_type = *buf - .first() - .ok_or(alloy_rlp::Error::Custom("cannot decode a receipt from empty bytes"))?; - - match rlp_type.cmp(&alloy_rlp::EMPTY_LIST_CODE) { - Ordering::Less => { - // strip out the string header - let _header = alloy_rlp::Header::decode(buf)?; - let receipt_type = *buf.first().ok_or(alloy_rlp::Error::Custom( - "typed receipt cannot be decoded from an empty slice", - ))?; - match receipt_type { - EIP2930_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip2930) - } - EIP1559_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip1559) - } - EIP4844_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip4844) - } - EIP7702_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip7702) - } - #[cfg(feature = "optimism")] - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Deposit) - } - _ => Err(alloy_rlp::Error::Custom("invalid receipt type")), - } - } - Ordering::Equal => { - Err(alloy_rlp::Error::Custom("an empty list is not a valid receipt encoding")) - } - Ordering::Greater => Self::decode_receipt(buf, TxType::Legacy), - } - } -} - -/// [`Receipt`] reference type with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ReceiptWithBloomRef<'a> { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: &'a Receipt, -} - -impl<'a> ReceiptWithBloomRef<'a> { - /// Create new [`ReceiptWithBloomRef`] - pub const fn new(receipt: &'a Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } - } - - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) - } - - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: self.receipt, bloom: &self.bloom } - } -} - -impl Encodable for ReceiptWithBloomRef<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.as_encoder().encode_inner(out, true) - } - fn length(&self) -> usize { - self.as_encoder().length() - } -} - -impl<'a> From<&'a Receipt> for ReceiptWithBloomRef<'a> { - fn from(receipt: &'a Receipt) -> Self { - let bloom = receipt.bloom_slow(); - ReceiptWithBloomRef { receipt, bloom } - } -} - -struct ReceiptWithBloomEncoder<'a> { - bloom: &'a Bloom, - receipt: &'a Receipt, -} - -impl ReceiptWithBloomEncoder<'_> { - /// Returns the rlp header for the receipt payload. - fn receipt_rlp_header(&self) -> alloy_rlp::Header { - let mut rlp_head = alloy_rlp::Header { list: true, payload_length: 0 }; - - rlp_head.payload_length += self.receipt.success.length(); - rlp_head.payload_length += self.receipt.cumulative_gas_used.length(); - rlp_head.payload_length += self.bloom.length(); - rlp_head.payload_length += self.receipt.logs.length(); - - #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - rlp_head.payload_length += deposit_nonce.length(); - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - rlp_head.payload_length += deposit_receipt_version.length(); - } - } - - rlp_head - } - - /// Encodes the receipt data. - fn encode_fields(&self, out: &mut dyn BufMut) { - self.receipt_rlp_header().encode(out); - self.receipt.success.encode(out); - self.receipt.cumulative_gas_used.encode(out); - self.bloom.encode(out); - self.receipt.logs.encode(out); - #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - deposit_nonce.encode(out) - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - deposit_receipt_version.encode(out) - } - } - } - - /// Encode receipt with or without the header data. - fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - if matches!(self.receipt.tx_type, TxType::Legacy) { - self.encode_fields(out); - return - } - - let mut payload = Vec::new(); - self.encode_fields(&mut payload); - - if with_header { - let payload_length = payload.len() + 1; - let header = alloy_rlp::Header { list: false, payload_length }; - header.encode(out); - } - - match self.receipt.tx_type { - TxType::Legacy => unreachable!("legacy already handled"), - - TxType::Eip2930 => { - out.put_u8(EIP2930_TX_TYPE_ID); - } - TxType::Eip1559 => { - out.put_u8(EIP1559_TX_TYPE_ID); - } - TxType::Eip4844 => { - out.put_u8(EIP4844_TX_TYPE_ID); - } - TxType::Eip7702 => { - out.put_u8(EIP7702_TX_TYPE_ID); - } - #[cfg(feature = "optimism")] - TxType::Deposit => { - out.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); - } - } - out.put_slice(payload.as_ref()); - } - - /// Returns the length of the receipt data. - fn receipt_length(&self) -> usize { - let rlp_head = self.receipt_rlp_header(); - length_of_length(rlp_head.payload_length) + rlp_head.payload_length - } -} - -impl Encodable for ReceiptWithBloomEncoder<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) - } - fn length(&self) -> usize { - let mut payload_len = self.receipt_length(); - // account for eip-2718 type prefix and set the list - if !matches!(self.receipt.tx_type, TxType::Legacy) { - payload_len += 1; - // we include a string header for typed receipts, so include the length here - payload_len += length_of_length(payload_len); - } - - payload_len - } -} - #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; use reth_codecs::Compact; @@ -610,7 +427,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; receipt.encode(&mut data); @@ -644,7 +461,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); @@ -654,7 +471,7 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_regolith_roundtrip() { - let data = hex!("7ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); + let data = hex!("b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -666,21 +483,21 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - receipt.encode_inner(&mut buf, false); + receipt.encode(&mut buf); assert_eq!(buf, &data[..]); } #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_canyon_roundtrip() { - let data = hex!("7ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); + let data = hex!("b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -692,14 +509,14 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: Some(1), }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - expected.encode_inner(&mut buf, false); + expected.encode(&mut buf); assert_eq!(buf, &data[..]); } @@ -746,7 +563,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: Bloom::default(), + logs_bloom: Bloom::default(), }; let encoded = receipt.encoded_2718(); @@ -768,7 +585,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: Bloom::default(), + logs_bloom: Bloom::default(), }; let legacy_encoded = legacy_receipt.encoded_2718(); diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a10b4afff9d..e5ccb47ba5c 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -217,7 +217,7 @@ pub fn build_block>( logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), ..Default::default() } - .into(), + .with_bloom(), ); calls.push(call); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index edaca5c1cfc..ec51df8982c 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -348,7 +348,6 @@ impl SparseStateTrie { }) } } - impl SparseStateTrie where F: BlindedProviderFactory, diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 011c95e6a92..cfce88fa020 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -23,6 +23,7 @@ reth-trie-common.workspace = true revm.workspace = true # alloy +alloy-eips.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true @@ -63,6 +64,7 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-trie/serde", + "alloy-eips/serde", "revm/serde", "reth-trie-common/serde" ] diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index 893e6e9e999..be6e4954579 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; use reth_trie::triehash::KeccakHasher; /// Benchmarks different implementations of the root calculation. @@ -27,8 +27,8 @@ pub fn trie_root_benchmark(c: &mut Criterion) { } } -fn generate_test_data(size: usize) -> Vec { - prop::collection::vec(arb::(), size) +fn generate_test_data(size: usize) -> Vec> { + prop::collection::vec(arb::>(), size) .new_tree(&mut TestRunner::new(ProptestConfig::default())) .unwrap() .current() @@ -43,19 +43,19 @@ criterion_main!(benches); mod implementations { use super::*; + use alloy_eips::eip2718::Encodable2718; use alloy_rlp::Encodable; use alloy_trie::root::adjust_index_for_rlp; + use reth_primitives::Receipt; use reth_trie_common::{HashBuilder, Nibbles}; - pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { - triehash::ordered_trie_root::(receipts.iter().map(|receipt| { - let mut receipt_rlp = Vec::new(); - receipt.encode_inner(&mut receipt_rlp, false); - receipt_rlp - })) + pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { + triehash::ordered_trie_root::( + receipts.iter().map(|receipt_with_bloom| receipt_with_bloom.encoded_2718()), + ) } - pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { + pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { let mut index_buffer = Vec::new(); let mut value_buffer = Vec::new(); @@ -68,7 +68,7 @@ mod implementations { index.encode(&mut index_buffer); value_buffer.clear(); - receipts[index].encode_inner(&mut value_buffer, false); + receipts[index].encode_2718(&mut value_buffer); hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); } From 4f28d6c7a4ca95b20eedbd4ba374c7af5cd85232 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 21:07:21 +0100 Subject: [PATCH 919/970] chore: disable url default features (#13191) --- Cargo.toml | 2 +- crates/net/peers/Cargo.toml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 08fa42e1046..650be8337b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -527,7 +527,7 @@ syn = "2.0" thiserror = { version = "2.0.0", default-features = false } tracing = "0.1.0" tracing-appender = "0.2" -url = "2.3" +url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 8ca5faec93d..4cfc0aee3d6 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -41,7 +41,8 @@ std = [ "alloy-rlp/std", "secp256k1?/std", "serde_with/std", - "thiserror/std" + "thiserror/std", + "url/std" ] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] net = ["dep:tokio", "tokio?/net"] From 53f72976186ff43a781abd810d5ba702bf3e5f4c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 20:57:01 +0100 Subject: [PATCH 920/970] chore: rm validate delegate (#13190) --- crates/primitives/src/transaction/sidecar.rs | 11 ----------- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 2cf04bc8e74..e244a53df77 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -39,17 +39,6 @@ impl BlobTransaction { } } - /// Verifies that the transaction's blob data, commitments, and proofs are all valid. - /// - /// See also [`alloy_consensus::TxEip4844::validate_blob`] - #[cfg(feature = "c-kzg")] - pub fn validate( - &self, - proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - self.tx().validate_blob(proof_settings) - } - /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index ba142651fd9..478d1de1c51 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -198,7 +198,7 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(EnvKzgSettings::Default.get()).map_err(|e| { + tx.tx().validate_blob(EnvKzgSettings::Default.get()).map_err(|e| { Eth::Error::from_eth_err(EthApiError::InvalidParams(e.to_string())) })?; } From e615010cc66cd1ad9cee4917515c58a5748de508 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 6 Dec 2024 15:33:07 -0500 Subject: [PATCH 921/970] fix: don't use reserved word None in bug template (#13192) --- .github/ISSUE_TEMPLATE/bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index b3e50defe16..b01d4518f75 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -67,7 +67,7 @@ body: description: Were you running it in a container? multiple: true options: - - None + - Not running in a container - Docker - Kubernetes - LXC/LXD From c608679963e9abeebad050ff9715e23e99648ad8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 03:38:20 +0700 Subject: [PATCH 922/970] perf(`AllTransactions`-iter): do not clone all transactions by default (#13187) --- crates/transaction-pool/src/pool/mod.rs | 17 ++++++++++++++--- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index c13dca17de0..d93b4a14d80 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -295,7 +295,7 @@ where /// Returns _all_ transactions in the pool. pub fn pooled_transactions(&self) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect() } /// Returns only the first `max` transactions in the pool. @@ -303,7 +303,13 @@ where &self, max: usize, ) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.propagate) + .take(max) + .cloned() + .collect() } /// Converts the internally tracked transaction to the pooled format. @@ -857,7 +863,12 @@ where &self, origin: TransactionOrigin, ) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.origin == origin).collect() + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.origin == origin) + .cloned() + .collect() } /// Returns all pending transactions filted by [`TransactionOrigin`] diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 7dd64da7364..1b330543cff 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1095,11 +1095,11 @@ impl AllTransactions { self.by_hash.keys().copied() } - /// Returns an iterator over all _unique_ hashes in the pool + /// Returns an iterator over all transactions in the pool pub(crate) fn transactions_iter( &self, - ) -> impl Iterator>> + '_ { - self.by_hash.values().cloned() + ) -> impl Iterator>> + '_ { + self.by_hash.values() } /// Returns if the transaction for the given hash is already included in this pool From a0326e4f86e43fa14eac0060fc5770cccdcc89b8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 03:35:30 +0700 Subject: [PATCH 923/970] perf: more `FxHashMap`s for `SenderId` key (#13188) --- crates/transaction-pool/src/pool/mod.rs | 10 +++------- crates/transaction-pool/src/pool/pending.rs | 9 +++++---- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index d93b4a14d80..89c4d6d3465 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -89,12 +89,8 @@ use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; use reth_primitives::RecoveredTx; -use std::{ - collections::{HashMap, HashSet}, - fmt, - sync::Arc, - time::Instant, -}; +use rustc_hash::FxHashMap; +use std::{collections::HashSet, fmt, sync::Arc, time::Instant}; use tokio::sync::mpsc; use tracing::{debug, trace, warn}; mod events; @@ -216,7 +212,7 @@ where fn changed_senders( &self, accs: impl Iterator, - ) -> HashMap { + ) -> FxHashMap { let mut identifiers = self.identifiers.write(); accs.into_iter() .map(|acc| { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 89e673aad99..27706bd1754 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -6,9 +6,10 @@ use crate::{ }, Priority, SubPoolLimit, TransactionOrdering, ValidPoolTransaction, }; +use rustc_hash::FxHashMap; use std::{ cmp::Ordering, - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -36,10 +37,10 @@ pub struct PendingPool { by_id: BTreeMap>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. - highest_nonces: HashMap>, + highest_nonces: FxHashMap>, /// Independent transactions that can be included directly and don't require other /// transactions. - independent_transactions: HashMap>, + independent_transactions: FxHashMap>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -523,7 +524,7 @@ impl PendingPool { /// Returns a reference to the independent transactions in the pool #[cfg(test)] - pub(crate) const fn independent(&self) -> &HashMap> { + pub(crate) const fn independent(&self) -> &FxHashMap> { &self.independent_transactions } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1b330543cff..11212e0aa3e 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -460,7 +460,7 @@ impl TxPool { /// Updates the transactions for the changed senders. pub(crate) fn update_accounts( &mut self, - changed_senders: HashMap, + changed_senders: FxHashMap, ) -> UpdateOutcome { // track changed accounts self.sender_info.extend(changed_senders.clone()); @@ -481,7 +481,7 @@ impl TxPool { &mut self, block_info: BlockInfo, mined_transactions: Vec, - changed_senders: HashMap, + changed_senders: FxHashMap, update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info @@ -1180,7 +1180,7 @@ impl AllTransactions { /// that got transaction included in the block. pub(crate) fn update( &mut self, - changed_accounts: HashMap, + changed_accounts: FxHashMap, ) -> Vec { // pre-allocate a few updates let mut updates = Vec::with_capacity(64); From 2183752f8d87d936ff3b2b982be0f22af63afa98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 6 Dec 2024 21:48:52 +0100 Subject: [PATCH 924/970] refactor(prune-types/prune): move PruneLimiter to `reth-prune` (#13182) --- crates/prune/prune/src/db_ext.rs | 2 +- crates/prune/prune/src/lib.rs | 2 ++ crates/prune/{types => prune}/src/limiter.rs | 25 ++++++++++++++++++ crates/prune/prune/src/pruner.rs | 4 +-- crates/prune/prune/src/segments/mod.rs | 6 ++--- crates/prune/prune/src/segments/receipts.rs | 10 +++---- .../prune/src/segments/static_file/headers.rs | 14 +++++----- .../src/segments/static_file/transactions.rs | 10 +++---- .../src/segments/user/account_history.rs | 11 ++++---- .../src/segments/user/receipts_by_logs.rs | 10 +++---- .../src/segments/user/sender_recovery.rs | 8 +++--- .../src/segments/user/storage_history.rs | 15 +++++------ .../src/segments/user/transaction_lookup.rs | 10 +++---- crates/prune/types/src/lib.rs | 2 -- crates/prune/types/src/pruner.rs | 26 +------------------ 15 files changed, 73 insertions(+), 82 deletions(-) rename crates/prune/{types => prune}/src/limiter.rs (94%) diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs index a14127af20e..143cb5e2775 100644 --- a/crates/prune/prune/src/db_ext.rs +++ b/crates/prune/prune/src/db_ext.rs @@ -1,12 +1,12 @@ use std::{fmt::Debug, ops::RangeBounds}; +use crate::PruneLimiter; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, RangeWalker}, table::{Table, TableRow}, transaction::DbTxMut, DatabaseError, }; -use reth_prune_types::PruneLimiter; use tracing::debug; pub(crate) trait DbTxPruneExt: DbTxMut { diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index e6bcbe5e812..ef3ee0de2db 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,6 +12,7 @@ mod builder; mod db_ext; mod error; +mod limiter; mod metrics; mod pruner; pub mod segments; @@ -19,6 +20,7 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; +pub use limiter::PruneLimiter; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/types/src/limiter.rs b/crates/prune/prune/src/limiter.rs similarity index 94% rename from crates/prune/types/src/limiter.rs rename to crates/prune/prune/src/limiter.rs index d555db25733..654eed04f28 100644 --- a/crates/prune/types/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -1,3 +1,4 @@ +use reth_prune_types::{PruneInterruptReason, PruneProgress}; use std::{ num::NonZeroUsize, time::{Duration, Instant}, @@ -119,6 +120,30 @@ impl PruneLimiter { pub fn is_limit_reached(&self) -> bool { self.is_deleted_entries_limit_reached() || self.is_time_limit_reached() } + + /// Creates new [`PruneInterruptReason`] based on the limiter's state. + pub fn interrupt_reason(&self) -> PruneInterruptReason { + if self.is_time_limit_reached() { + PruneInterruptReason::Timeout + } else if self.is_deleted_entries_limit_reached() { + PruneInterruptReason::DeletedEntriesLimitReached + } else { + PruneInterruptReason::Unknown + } + } + + /// Creates new [`PruneProgress`]. + /// + /// If `done == true`, returns [`PruneProgress::Finished`], otherwise + /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the + /// limiter's state. + pub fn progress(&self, done: bool) -> PruneProgress { + if done { + PruneProgress::Finished + } else { + PruneProgress::HasMoreData(self.interrupt_reason()) + } + } } #[cfg(test)] diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index 0ad149bb654..2344578bd08 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -2,14 +2,14 @@ use crate::{ segments::{PruneInput, Segment}, - Metrics, PrunerError, PrunerEvent, + Metrics, PruneLimiter, PrunerError, PrunerEvent, }; use alloy_primitives::BlockNumber; use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PrunedSegmentInfo, PrunerOutput}; +use reth_prune_types::{PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index e828512fa82..ae18bcb3c6e 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -3,12 +3,10 @@ mod set; mod static_file; mod user; -use crate::PrunerError; +use crate::{PruneLimiter, PrunerError}; use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; -use reth_prune_types::{ - PruneCheckpoint, PruneLimiter, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, -}; +use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; pub use static_file::{ Headers as StaticFileHeaders, Receipts as StaticFileReceipts, diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index a365738a777..dbea32c47fe 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -12,9 +12,7 @@ use reth_provider::{ errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, }; -use reth_prune_types::{ - PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; +use reth_prune_types::{PruneCheckpoint, PruneSegment, SegmentOutput, SegmentOutputCheckpoint}; use tracing::trace; pub(crate) fn prune( @@ -56,7 +54,7 @@ where // so we could finish pruning its receipts on the next run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -83,7 +81,7 @@ pub(crate) fn save_checkpoint( #[cfg(test)] mod tests { - use crate::segments::{PruneInput, SegmentOutput}; + use crate::segments::{PruneInput, PruneLimiter, SegmentOutput}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -93,7 +91,7 @@ mod tests { use reth_db::tables; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 5cd6f62643a..7d100f4e283 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -3,7 +3,7 @@ use std::num::NonZeroUsize; use crate::{ db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, - PrunerError, + PruneLimiter, PrunerError, }; use alloy_primitives::BlockNumber; use itertools::Itertools; @@ -14,8 +14,7 @@ use reth_db::{ }; use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ - PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, - SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use reth_static_file_types::StaticFileSegment; use tracing::trace; @@ -92,7 +91,7 @@ impl> Segment Self { - if limiter.is_time_limit_reached() { - Self::Timeout - } else if limiter.is_deleted_entries_limit_reached() { - Self::DeletedEntriesLimitReached - } else { - Self::Unknown - } - } - /// Returns `true` if the reason is timeout. pub const fn is_timeout(&self) -> bool { matches!(self, Self::Timeout) @@ -124,19 +113,6 @@ impl PruneInterruptReason { } impl PruneProgress { - /// Creates new [`PruneProgress`]. - /// - /// If `done == true`, returns [`PruneProgress::Finished`], otherwise - /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the - /// passed limiter. - pub fn new(done: bool, limiter: &PruneLimiter) -> Self { - if done { - Self::Finished - } else { - Self::HasMoreData(PruneInterruptReason::new(limiter)) - } - } - /// Returns `true` if prune run is finished. pub const fn is_finished(&self) -> bool { matches!(self, Self::Finished) From e9915702fa226bdec497993ccee9fb4818cbbc18 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 22:08:22 +0100 Subject: [PATCH 925/970] perf: call increment once (#13193) --- crates/transaction-pool/src/pool/txpool.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 11212e0aa3e..5820b5f894a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -489,13 +489,16 @@ impl TxPool { self.all_transactions.set_block_info(block_info); // Remove all transaction that were included in the block + let mut removed_txs_count = 0; for tx_hash in &mined_transactions { if self.prune_transaction_by_hash(tx_hash).is_some() { - // Update removed transactions metric - self.metrics.removed_transactions.increment(1); + removed_txs_count += 1; } } + // Update removed transactions metric + self.metrics.removed_transactions.increment(removed_txs_count); + let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders); self.update_transaction_type_metrics(); From 552c6237a8267a05840ae8d99de5c164b8e75017 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 6 Dec 2024 16:35:51 -0500 Subject: [PATCH 926/970] feat: make BlockResponse generic over header (#13195) --- crates/net/downloaders/src/bodies/bodies.rs | 19 ++++++++++----- crates/net/downloaders/src/bodies/noop.rs | 2 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 4 ++-- crates/net/p2p/src/bodies/downloader.rs | 2 +- crates/net/p2p/src/bodies/response.rs | 23 +++++++++++-------- crates/primitives-traits/src/header/sealed.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 2 +- 8 files changed, 33 insertions(+), 23 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 1ee94929913..bdf2aca9c77 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -61,7 +61,7 @@ pub struct BodiesDownloader { /// Buffered responses buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec>, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } @@ -193,7 +193,7 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec>) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); @@ -210,7 +210,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec>) { + fn buffer_bodies_response( + &mut self, + response: Vec>, + ) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + response.capacity() * mem::size_of::>(); @@ -227,7 +230,9 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option>> { + fn try_next_buffered( + &mut self, + ) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -253,7 +258,9 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option>> { + fn try_split_next_batch( + &mut self, + ) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -436,7 +443,7 @@ where #[derive(Debug)] struct OrderedBodiesResponse { - resp: Vec>, + resp: Vec>, /// The total size of the response in bytes size: usize, } diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index 494a5f2ef2e..f311a242c20 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -21,7 +21,7 @@ impl BodyDownloader for NoopBodiesDownloader { } impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index aa6ec9e4af0..5f1e8b059cf 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -80,7 +80,7 @@ impl Stream for BodiesRequestQueue where B: BodiesClient + 'static, { - type Item = DownloadResult>>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 40fa9c309ba..28cfdb61b7c 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -48,7 +48,7 @@ pub(crate) struct BodiesRequestFuture { // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks - buffer: Vec>, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, @@ -217,7 +217,7 @@ impl Future for BodiesRequestFuture where B: BodiesClient + 'static, { - type Output = DownloadResult>>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index 7008c08e522..06f35fc9bd6 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,7 +5,7 @@ use futures::Stream; use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 11aaab17a30..02534ea0963 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,19 +1,22 @@ use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{BlockHeader, InMemorySize}; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response - Empty(SealedHeader), + Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse +where + H: BlockHeader, +{ /// Return the reference to the response header - pub const fn header(&self) -> &SealedHeader { + pub const fn header(&self) -> &SealedHeader { match self { Self::Full(block) => &block.header, Self::Empty(header) => header, @@ -22,14 +25,14 @@ impl BlockResponse { /// Return the block number pub fn block_number(&self) -> BlockNumber { - self.header().number + self.header().number() } /// Return the reference to the response header pub fn difficulty(&self) -> U256 { match self { - Self::Full(block) => block.difficulty, - Self::Empty(header) => header.difficulty, + Self::Full(block) => block.difficulty(), + Self::Empty(header) => header.difficulty(), } } @@ -42,7 +45,7 @@ impl BlockResponse { } } -impl InMemorySize for BlockResponse { +impl InMemorySize for BlockResponse { #[inline] fn size(&self) -> usize { match self { diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 1a5163e6ba3..e99b0e1c17f 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -67,7 +67,7 @@ impl SealedHeader { } } -impl InMemorySize for SealedHeader { +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] fn size(&self) -> usize { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 83be3f36fcf..88a1b96e249 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -56,7 +56,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>>, + buffer: Option>>, } impl BodyStage { From 9167e454b5b3c07ea3147fcf99760ec37b0207ad Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 7 Dec 2024 07:28:50 +0400 Subject: [PATCH 927/970] refactor: simplify and relax some RPC bounds (#13202) --- crates/primitives-traits/src/receipt.rs | 9 +- crates/rpc/rpc-builder/src/eth.rs | 36 ++---- crates/rpc/rpc-builder/src/lib.rs | 88 ++++++-------- crates/rpc/rpc/src/debug.rs | 131 ++++++++++----------- crates/rpc/rpc/src/eth/filter.rs | 148 ++++++++++++------------ crates/rpc/rpc/src/eth/pubsub.rs | 122 ++++++++----------- crates/rpc/rpc/src/trace.rs | 48 +++----- crates/rpc/rpc/src/validation.rs | 47 ++++---- 8 files changed, 279 insertions(+), 350 deletions(-) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 1c115981e3e..1b5d2b698c8 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -3,7 +3,9 @@ use alloc::vec::Vec; use core::fmt; -use alloy_consensus::{TxReceipt, Typed2718}; +use alloy_consensus::{ + Eip2718EncodableReceipt, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, +}; use alloy_primitives::B256; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; @@ -23,8 +25,9 @@ pub trait Receipt: + Default + fmt::Debug + TxReceipt - + alloy_rlp::Encodable - + alloy_rlp::Decodable + + RlpEncodableReceipt + + RlpDecodableReceipt + + Eip2718EncodableReceipt + Typed2718 + MaybeSerde + InMemorySize diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 453efb0ddb4..2a6744e7b18 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,6 +1,6 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::EthPrimitives; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -15,38 +15,35 @@ pub type DynEthApiBuilder { +pub struct EthHandlers { /// Main `eth_` request handler pub api: EthApi, /// The async caching layer used by the eth handlers pub cache: EthStateCache, /// Polling based filter handler available on all transports - pub filter: EthFilter, + pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) - pub pubsub: EthPubSub, + pub pubsub: EthPubSub, } -impl EthHandlers +impl EthHandlers where Provider: StateProviderFactory + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, > + EvmEnvProvider + Clone + Unpin + 'static, - Pool: Send + Sync + Clone + 'static, - Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { /// Returns a new instance with handlers for `eth` namespace. /// /// This will spawn all necessary tasks for the handlers. #[allow(clippy::too_many_arguments)] - pub fn bootstrap( + pub fn bootstrap( provider: Provider, pool: Pool, network: Network, @@ -92,22 +89,13 @@ where let api = eth_api_builder(&ctx); - let filter = EthFilter::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.cache.clone(), - ctx.config.filter_config(), - Box::new(ctx.executor.clone()), - api.tx_resp_builder().clone(), - ); + let filter = + EthFilter::new(api.clone(), ctx.config.filter_config(), Box::new(ctx.executor.clone())); let pubsub = EthPubSub::with_spawner( - ctx.provider.clone(), - ctx.pool.clone(), + api.clone(), ctx.events.clone(), - ctx.network.clone(), Box::new(ctx.executor.clone()), - api.tx_resp_builder().clone(), ); Self { api, cache: ctx.cache, filter, pubsub } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index df25564486f..949e377afb1 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -204,11 +204,11 @@ use reth_consensus::FullConsensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::{EthPrimitives, NodePrimitives}; +use reth_primitives::NodePrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, HeaderProvider, ProviderBlock, ProviderHeader, - ProviderReceipt, ReceiptProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, + ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, @@ -273,7 +273,7 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc, + consensus: Arc>, ) -> Result where Provider: FullRpcProvider< @@ -285,7 +285,7 @@ where Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer< Provider: BlockReader< @@ -298,6 +298,8 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, >, { @@ -649,15 +651,17 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, >, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -687,11 +691,9 @@ where >, >, Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - Header = ::Header, + Block = ::Block, + Receipt = ::Receipt, >, - Pool: TransactionPool::Transaction>, { let Self { provider, @@ -815,7 +817,6 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, - Header = ::Header, >, Pool: TransactionPool::Transaction>, { @@ -963,7 +964,7 @@ pub struct RpcRegistryInner< /// Holds the configuration for the RPC modules config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers - eth: EthHandlers, + eth: EthHandlers, /// to put trace calls behind semaphore blocking_pool_guard: BlockingTaskGuard, /// Contains the [Methods] of a module @@ -977,16 +978,15 @@ impl where Provider: StateProviderFactory + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, > + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, EthApi: EthApiTypes + 'static, BlockExecutor: BlockExecutorProvider, @@ -1057,7 +1057,7 @@ where } /// Returns a reference to the installed [`EthHandlers`]. - pub const fn eth_handlers(&self) -> &EthHandlers { + pub const fn eth_handlers(&self) -> &EthHandlers { &self.eth } @@ -1215,7 +1215,6 @@ where pub fn register_trace(&mut self) -> &mut Self where EthApi: TraceExt, - Provider: BlockReader::Block>, { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1276,15 +1275,11 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&self) -> TraceApi + pub fn trace_api(&self) -> TraceApi where EthApi: TraceExt, { - TraceApi::new( - self.provider.clone(), - self.eth_api().clone(), - self.blocking_pool_guard.clone(), - ) + TraceApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) } /// Instantiates [`EthBundle`] Api @@ -1305,14 +1300,13 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi + pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, BlockExecutor: BlockExecutorProvider>, { DebugApi::new( - self.provider.clone(), self.eth_api().clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1340,7 +1334,7 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { ValidationApi::new( self.provider.clone(), @@ -1355,30 +1349,27 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider< - Block = ::Block, - Receipt = ::Receipt, - Header = ::Header, - > + AccountReader - + ChangeSetReader, - Pool: TransactionPool::Transaction> + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, >, >, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1468,7 +1459,6 @@ where .into() } RethRpcModule::Debug => DebugApi::new( - self.provider.clone(), eth_api.clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1495,16 +1485,14 @@ where RethRpcModule::Net => { NetApi::new(self.network.clone(), eth_api.clone()).into_rpc().into() } - RethRpcModule::Trace => TraceApi::new( - self.provider.clone(), - eth_api.clone(), - self.blocking_pool_guard.clone(), - ) - .into_rpc() - .into(), + RethRpcModule::Trace => { + TraceApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), RethRpcModule::Txpool => TxPoolApi::new( - self.pool.clone(), + self.eth.api.pool().clone(), self.eth.api.tx_resp_builder().clone(), ) .into_rpc() @@ -1524,7 +1512,7 @@ where .into() } RethRpcModule::Flashbots => ValidationApi::new( - self.provider.clone(), + eth_api.provider().clone(), Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 91236ca9de5..5e799dd69ca 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -19,16 +19,16 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{BlockExt, NodePrimitives, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, NodePrimitives, ReceiptWithBloom, SealedBlockWithSenders}; use reth_primitives_traits::{Block as _, BlockBody, SignedTransaction}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, - StateProofProvider, StateProviderFactory, TransactionVariant, + BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, + ReceiptProviderIdExt, StateProofProvider, TransactionVariant, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, + helpers::{EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; @@ -47,22 +47,20 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { - inner: Arc>, +pub struct DebugApi { + inner: Arc>, } // === impl DebugApi === -impl DebugApi { +impl DebugApi { /// Create a new instance of the [`DebugApi`] pub fn new( - provider: Provider, eth: Eth, blocking_task_guard: BlockingTaskGuard, block_executor: BlockExecutor, ) -> Self { - let inner = - Arc::new(DebugApiInner { provider, eth_api: eth, blocking_task_guard, block_executor }); + let inner = Arc::new(DebugApiInner { eth_api: eth, blocking_task_guard, block_executor }); Self { inner } } @@ -72,15 +70,17 @@ impl DebugApi { } } +impl DebugApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl DebugApi === -impl DebugApi +impl DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + 'static, Eth: EthApiTypes + TraceExt + 'static, BlockExecutor: BlockExecutorProvider>>, @@ -164,34 +164,30 @@ where let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(block.header()).await?; // Depending on EIP-2 we need to recover the transactions differently - let senders = if self - .inner - .provider - .chain_spec() - .is_homestead_active_at_block(block.header().number()) - { - block - .body() - .transactions() - .iter() - .map(|tx| { - tx.recover_signer() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - } else { - block - .body() - .transactions() - .iter() - .map(|tx| { - tx.recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - }; + let senders = + if self.provider().chain_spec().is_homestead_active_at_block(block.header().number()) { + block + .body() + .transactions() + .iter() + .map(|tx| { + tx.recover_signer() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + } else { + block + .body() + .transactions() + .iter() + .map(|tx| { + tx.recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + }; self.trace_block( Arc::new(block.with_senders_unchecked(senders).seal_slow()), @@ -209,8 +205,7 @@ where opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { let block_hash = self - .inner - .provider + .provider() .block_hash_for_id(block_id) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -813,30 +808,25 @@ where } #[async_trait] -impl DebugApiServer for DebugApi +impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + 'static, - Eth: EthApiSpec + EthTransactions + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives::Provider as BlockReader>::Block>, - >, + Eth: EthApiTypes + EthTransactions + TraceExt + 'static, + BlockExecutor: + BlockExecutorProvider>>, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { let header = match block_id { - BlockId::Hash(hash) => self.inner.provider.header(&hash.into()).to_rpc_result()?, + BlockId::Hash(hash) => self.provider().header(&hash.into()).to_rpc_result()?, BlockId::Number(number_or_tag) => { let number = self - .inner - .provider + .provider() .convert_block_number(number_or_tag) .to_rpc_result()? - .ok_or_else(|| internal_rpc_err("Pending block not supported".to_string()))?; - self.inner.provider.header_by_number(number).to_rpc_result()? + .ok_or_else(|| { + internal_rpc_err("Pending block not supported".to_string()) + })?; + self.provider().header_by_number(number).to_rpc_result()? } }; @@ -851,8 +841,7 @@ where /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self - .inner - .provider + .provider() .block_by_id(block_id) .to_rpc_result()? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -874,8 +863,7 @@ where /// Returns the bytes of the transaction for the given hash. async fn raw_transactions(&self, block_id: BlockId) -> RpcResult> { let block = self - .inner - .provider + .provider() .block_with_senders_by_id(block_id, TransactionVariant::NoHash) .to_rpc_result()? .unwrap_or_default(); @@ -885,13 +873,12 @@ where /// Handler for `debug_getRawReceipts` async fn raw_receipts(&self, block_id: BlockId) -> RpcResult> { Ok(self - .inner - .provider + .provider() .receipts_by_block_id(block_id) .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().encoded_2718().into()) + .map(|receipt| ReceiptWithBloom::from(receipt).encoded_2718().into()) .collect()) } @@ -1201,21 +1188,19 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 8f50fefcb61..6441db70459 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -10,9 +10,13 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_primitives::SealedBlockWithSenders; -use reth_provider::{BlockIdReader, BlockReader, ProviderBlock, ProviderError, ProviderReceipt}; +use reth_provider::{ + BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, + ProviderError, ProviderReceipt, +}; use reth_rpc_eth_api::{ - EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, + EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcNodeCoreExt, RpcTransaction, + TransactionCompat, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -40,27 +44,22 @@ use tracing::{error, trace}; const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb /// `Eth` filter RPC implementation. -pub struct EthFilter { +pub struct EthFilter { /// All nested fields bundled together - inner: Arc>>, - /// Assembles response data w.r.t. network. - tx_resp_builder: Eth::TransactionCompat, + inner: Arc>, } -impl Clone for EthFilter +impl Clone for EthFilter where Eth: EthApiTypes, - Provider: BlockReader, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + Self { inner: self.inner.clone() } } } -impl EthFilter +impl EthFilter where - Provider: BlockReader + Send + Sync + 'static, - Pool: Send + Sync + 'static, Eth: EthApiTypes + 'static, { /// Creates a new, shareable instance. @@ -71,22 +70,13 @@ where /// See also [`EthFilterConfig`]. /// /// This also spawns a task that periodically clears stale filters. - pub fn new( - provider: Provider, - pool: Pool, - eth_cache: EthStateCache, - config: EthFilterConfig, - task_spawner: Box, - tx_resp_builder: Eth::TransactionCompat, - ) -> Self { + pub fn new(eth_api: Eth, config: EthFilterConfig, task_spawner: Box) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; let inner = EthFilterInner { - provider, + eth_api, active_filters: ActiveFilters::new(), - pool, id_provider: Arc::new(EthSubscriptionIdProvider::default()), - eth_cache, max_headers_range: MAX_HEADERS_RANGE, task_spawner, stale_filter_ttl, @@ -95,7 +85,7 @@ where max_logs_per_response: max_logs_per_response.unwrap_or(usize::MAX), }; - let eth_filter = Self { inner: Arc::new(inner), tx_resp_builder }; + let eth_filter = Self { inner: Arc::new(inner) }; let this = eth_filter.clone(); eth_filter.inner.task_spawner.spawn_critical( @@ -143,18 +133,26 @@ where } } -impl EthFilter +impl EthFilter where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool::Transaction> + 'static, - Eth: FullEthApiTypes, + Eth: FullEthApiTypes + RpcNodeCoreExt, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } + + /// Access the underlying pool. + fn pool(&self) -> &Eth::Pool { + self.inner.eth_api.pool() + } + /// Returns all the filter changes for the given id, if any pub async fn filter_changes( &self, id: FilterId, ) -> Result>, EthFilterError> { - let info = self.inner.provider.chain_info()?; + let info = self.provider().chain_info()?; let best_number = info.best_number; // start_block is the block from which we should start fetching changes, the next block from @@ -185,7 +183,7 @@ where // [start_block..best_block] let end_block = best_number + 1; let block_hashes = - self.inner.provider.canonical_hashes_range(start_block, end_block).map_err( + self.provider().canonical_hashes_range(start_block, end_block).map_err( |_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()), )?; Ok(FilterChanges::Hashes(block_hashes)) @@ -194,11 +192,11 @@ where let (from_block_number, to_block_number) = match filter.block_option { FilterBlockOption::Range { from_block, to_block } => { let from = from_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); logs_utils::get_filter_block_range(from, to, start_block, info) @@ -242,12 +240,9 @@ where } #[async_trait] -impl EthFilterApiServer> - for EthFilter +impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool::Transaction> + 'static, - Eth: FullEthApiTypes + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -272,14 +267,16 @@ where let transaction_kind = match kind.unwrap_or_default() { PendingTransactionFilterKind::Hashes => { - let receiver = self.inner.pool.pending_transactions_listener(); + let receiver = self.pool().pending_transactions_listener(); let pending_txs_receiver = PendingTransactionsReceiver::new(receiver); FilterKind::PendingTransaction(PendingTransactionKind::Hashes(pending_txs_receiver)) } PendingTransactionFilterKind::Full => { - let stream = self.inner.pool.new_pending_pool_transactions_listener(); - let full_txs_receiver = - FullTransactionsReceiver::new(stream, self.tx_resp_builder.clone()); + let stream = self.pool().new_pending_pool_transactions_listener(); + let full_txs_receiver = FullTransactionsReceiver::new( + stream, + self.inner.eth_api.tx_resp_builder().clone(), + ); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, ))) @@ -332,10 +329,9 @@ where } } -impl std::fmt::Debug for EthFilter +impl std::fmt::Debug for EthFilter where Eth: EthApiTypes, - Provider: BlockReader, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthFilter").finish_non_exhaustive() @@ -344,21 +340,17 @@ where /// Container type `EthFilter` #[derive(Debug)] -struct EthFilterInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthFilterInner { + /// Inner `eth` API implementation. + eth_api: Eth, /// All currently installed filters. - active_filters: ActiveFilters, + active_filters: ActiveFilters>, /// Provides ids to identify filters id_provider: Arc, /// Maximum number of blocks that could be scanned per filter max_blocks_per_filter: u64, /// Maximum number of logs that can be returned in a response max_logs_per_response: usize, - /// The async cache frontend for eth related data - eth_cache: EthStateCache, /// maximum number of headers to read at once for range filter max_headers_range: u64, /// The type that can spawn tasks. @@ -367,11 +359,22 @@ struct EthFilterInner { stale_filter_ttl: Duration, } -impl EthFilterInner +impl EthFilterInner where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Eth: RpcNodeCoreExt + EthApiTypes, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.eth_api.provider() + } + + /// Access the underlying [`EthStateCache`]. + fn eth_cache( + &self, + ) -> &EthStateCache, ProviderReceipt> { + self.eth_api.cache() + } + /// Returns logs matching given filter object. async fn logs_for_filter(&self, filter: Filter) -> Result, EthFilterError> { match filter.block_option { @@ -379,7 +382,7 @@ where // for all matching logs in the block // get the block header with the hash let header = self - .provider + .provider() .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; @@ -390,7 +393,7 @@ where let (receipts, maybe_block) = self .receipts_and_maybe_block( &block_num_hash, - self.provider.chain_info()?.best_number, + self.provider().chain_info()?.best_number, ) .await? .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; @@ -399,8 +402,8 @@ where append_matching_block_logs( &mut all_logs, maybe_block - .map(|b| ProviderOrBlock::Block(b)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &FilteredParams::new(Some(filter)), block_num_hash, &receipts, @@ -412,16 +415,16 @@ where } FilterBlockOption::Range { from_block, to_block } => { // compute the range - let info = self.provider.chain_info()?; + let info = self.provider().chain_info()?; // we start at the most recent block if unset in filter let start_block = info.best_number; let from = from_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let (from_block_number, to_block_number) = @@ -433,8 +436,11 @@ where } /// Installs a new filter and returns the new identifier. - async fn install_filter(&self, kind: FilterKind) -> RpcResult { - let last_poll_block_number = self.provider.best_block_number().to_rpc_result()?; + async fn install_filter( + &self, + kind: FilterKind>, + ) -> RpcResult { + let last_poll_block_number = self.provider().best_block_number().to_rpc_result()?; let id = FilterId::from(self.id_provider.next_id()); let mut filters = self.active_filters.inner.lock().await; filters.insert( @@ -482,7 +488,7 @@ where for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range) { - let headers = self.provider.headers_range(from..=to)?; + let headers = self.provider().headers_range(from..=to)?; for (idx, header) in headers.iter().enumerate() { // only if filter matches @@ -494,7 +500,7 @@ where let block_hash = match headers.get(idx + 1) { Some(parent) => parent.parent_hash(), None => self - .provider + .provider() .block_hash(header.number())? .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, }; @@ -506,8 +512,8 @@ where append_matching_block_logs( &mut all_logs, maybe_block - .map(|block| ProviderOrBlock::Block(block)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &filter_params, num_hash, &receipts, @@ -540,20 +546,20 @@ where best_number: u64, ) -> Result< Option<( - Arc>>, - Option>>>, + Arc>>, + Option>>>, )>, EthFilterError, > { // The last 4 blocks are most likely cached, so we can just fetch them let cached_range = best_number.saturating_sub(4)..=best_number; let receipts_block = if cached_range.contains(&block_num_hash.number) { - self.eth_cache + self.eth_cache() .get_block_and_receipts(block_num_hash.hash) .await? .map(|(b, r)| (r, Some(b))) } else { - self.eth_cache.get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) + self.eth_cache().get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) }; Ok(receipts_block) } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 58c62133730..596af187635 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -17,8 +17,10 @@ use jsonrpsee::{ }; use reth_network_api::NetworkInfo; use reth_primitives::NodePrimitives; -use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; +use reth_provider::{BlockNumReader, CanonStateSubscriptions}; +use reth_rpc_eth_api::{ + pubsub::EthPubSubApiServer, EthApiTypes, RpcNodeCore, RpcTransaction, TransactionCompat, +}; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; @@ -35,67 +37,47 @@ use tracing::error; /// /// This handles `eth_subscribe` RPC calls. #[derive(Clone)] -pub struct EthPubSub { +pub struct EthPubSub { /// All nested fields bundled together. - inner: Arc>, + inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, - tx_resp_builder: Eth, } // === impl EthPubSub === -impl EthPubSub { +impl EthPubSub { /// Creates a new, shareable instance. /// /// Subscription tasks are spawned via [`tokio::task::spawn`] - pub fn new( - provider: Provider, - pool: Pool, - chain_events: Events, - network: Network, - tx_resp_builder: Eth, - ) -> Self { - Self::with_spawner( - provider, - pool, - chain_events, - network, - Box::::default(), - tx_resp_builder, - ) + pub fn new(eth_api: Eth, chain_events: Events) -> Self { + Self::with_spawner(eth_api, chain_events, Box::::default()) } /// Creates a new, shareable instance. pub fn with_spawner( - provider: Provider, - pool: Pool, + eth_api: Eth, chain_events: Events, - network: Network, subscription_task_spawner: Box, - tx_resp_builder: Eth, ) -> Self { - let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner: Arc::new(inner), subscription_task_spawner, tx_resp_builder } + let inner = EthPubSubInner { eth_api, chain_events }; + Self { inner: Arc::new(inner), subscription_task_spawner } } } #[async_trait::async_trait] -impl EthPubSubApiServer - for EthPubSub +impl EthPubSubApiServer> for EthPubSub where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, Events: CanonStateSubscriptions< Primitives: NodePrimitives< - SignedTx: Encodable2718, BlockHeader = reth_primitives::Header, Receipt = reth_primitives::Receipt, >, > + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat> + 'static, + Eth: RpcNodeCore + + EthApiTypes>> + + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -106,9 +88,8 @@ where ) -> jsonrpsee::core::SubscriptionResult { let sink = pending.accept().await?; let pubsub = self.inner.clone(); - let resp_builder = self.tx_resp_builder.clone(); self.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted(pubsub, sink, kind, params, resp_builder).await; + let _ = handle_accepted(pubsub, sink, kind, params).await; })); Ok(()) @@ -116,16 +97,13 @@ where } /// The actual handler for an accepted [`EthPubSub::subscribe`] call. -async fn handle_accepted( - pubsub: Arc>, +async fn handle_accepted( + pubsub: Arc>, accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, - tx_resp_builder: Eth, ) -> Result<(), ErrorObject<'static>> where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, Events: CanonStateSubscriptions< Primitives: NodePrimitives< SignedTx: Encodable2718, @@ -134,8 +112,8 @@ where >, > + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat>, + Eth: RpcNodeCore + + EthApiTypes>>, { match kind { SubscriptionKind::NewHeads => { @@ -166,7 +144,7 @@ where let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { let tx_value = match from_recovered( tx.transaction.to_consensus(), - &tx_resp_builder, + pubsub.eth_api.tx_resp_builder(), ) { Ok(tx) => { Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) @@ -204,7 +182,7 @@ where let mut canon_state = BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state()); // get current sync status - let mut initial_sync_status = pubsub.network.is_syncing(); + let mut initial_sync_status = pubsub.eth_api.network().is_syncing(); let current_sub_res = pubsub.sync_status(initial_sync_status); // send the current status immediately @@ -215,7 +193,7 @@ where } while canon_state.next().await.is_some() { - let current_syncing = pubsub.network.is_syncing(); + let current_syncing = pubsub.eth_api.network().is_syncing(); // Only send a new response if the sync status has changed if current_syncing != initial_sync_status { // Update the sync status on each new block @@ -285,9 +263,7 @@ where } } -impl std::fmt::Debug - for EthPubSub -{ +impl std::fmt::Debug for EthPubSub { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthPubSub").finish_non_exhaustive() } @@ -295,28 +271,28 @@ impl std::fmt::Debug /// Container type `EthPubSub` #[derive(Clone)] -struct EthPubSubInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthPubSubInner { + /// The `eth` API. + eth_api: EthApi, /// A type that allows to create new event subscriptions. chain_events: Events, - /// The network. - network: Network, } // == impl EthPubSubInner === -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + 'static, + Eth: RpcNodeCore, { /// Returns the current sync status for the `syncing` subscription fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { if is_syncing { - let current_block = - self.provider.chain_info().map(|info| info.best_number).unwrap_or_default(); + let current_block = self + .eth_api + .provider() + .chain_info() + .map(|info| info.best_number) + .unwrap_or_default(); EthSubscriptionResult::SyncState(PubSubSyncStatus::Detailed(SyncStatusMetadata { syncing: true, starting_block: 0, @@ -329,35 +305,31 @@ where } } -impl EthPubSubInner +impl EthPubSubInner where - Pool: TransactionPool + 'static, + Eth: RpcNodeCore, { /// Returns a stream that yields all transaction hashes emitted by the txpool. fn pending_transaction_hashes_stream(&self) -> impl Stream { - ReceiverStream::new(self.pool.pending_transactions_listener()) + ReceiverStream::new(self.eth_api.pool().pending_transactions_listener()) } /// Returns a stream that yields all transactions emitted by the txpool. fn full_pending_transaction_stream( &self, - ) -> impl Stream::Transaction>> { - self.pool.new_pending_pool_transactions_listener() + ) -> impl Stream::Transaction>> { + self.eth_api.pool().new_pending_pool_transactions_listener() } } -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + EvmEnvProvider + 'static, Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - SignedTx: Encodable2718, - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + 'static, - Network: NetworkInfo + 'static, - Pool: 'static, + Primitives: NodePrimitives< + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + >, { /// Returns a stream that yields all new RPC blocks. fn new_headers_stream(&self) -> impl Stream { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5f1bbb7439d..b164e3c19eb 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -20,10 +20,10 @@ use reth_consensus_common::calc::{ }; use reth_evm::ConfigureEvmEnv; use reth_primitives_traits::{BlockBody, BlockHeader}; -use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; @@ -41,21 +41,16 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. /// /// This type provides the functionality for handling `trace` related requests. -pub struct TraceApi { - inner: Arc>, +pub struct TraceApi { + inner: Arc>, } // === impl TraceApi === -impl TraceApi { - /// The provider that can interact with the chain. - pub fn provider(&self) -> &Provider { - &self.inner.provider - } - +impl TraceApi { /// Create a new instance of the [`TraceApi`] - pub fn new(provider: Provider, eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { - let inner = Arc::new(TraceApiInner { provider, eth_api, blocking_task_guard }); + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + let inner = Arc::new(TraceApiInner { eth_api, blocking_task_guard }); Self { inner } } @@ -72,15 +67,17 @@ impl TraceApi { } } +impl TraceApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl TraceApi === -impl TraceApi +impl TraceApi where - Provider: BlockReader::Block> - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -576,13 +573,8 @@ where } #[async_trait] -impl TraceApiServer for TraceApi +impl TraceApiServer for TraceApi where - Provider: BlockReader::Block> - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -704,20 +696,18 @@ where } } -impl std::fmt::Debug for TraceApi { +impl std::fmt::Debug for TraceApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceApi").finish_non_exhaustive() } } -impl Clone for TraceApi { +impl Clone for TraceApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct TraceApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct TraceApiInner { /// Access to commonly used code of the `eth` namespace eth_api: Eth, // restrict the number of concurrent calls to `trace_*` diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b72a5d35769..b13e99eb21c 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,4 +1,6 @@ -use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction, TxReceipt}; +use alloy_consensus::{ + BlobTransactionValidationError, BlockHeader, EnvKzgSettings, Transaction, TxReceipt, +}; use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, @@ -16,10 +18,10 @@ use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::{Block as _, BlockBody}; use reth_provider::{ - AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, - StateProviderFactory, WithdrawalsProvider, + BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, StateProviderFactory, }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; @@ -32,7 +34,7 @@ use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] inner: Arc>, } @@ -40,11 +42,12 @@ pub struct ValidationApi { impl ValidationApi where Provider: ChainSpecProvider, + E: BlockExecutorProvider, { /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc, + consensus: Arc>, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -91,21 +94,18 @@ where Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider + 'static, E: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, + BlockHeader = Provider::Header, + BlockBody = reth_primitives::BlockBody, >, >, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders<::Block>, message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { @@ -187,9 +187,9 @@ where let state_root = state_provider.state_root(state_provider.hashed_post_state(&output.state))?; - if state_root != block.state_root { + if state_root != block.header().state_root() { return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), + GotExpected { got: state_root, expected: block.header().state_root() }.into(), ) .into()) } @@ -262,7 +262,7 @@ where /// to checking the latest block transaction. fn ensure_payment( &self, - block: &Block, + block: &::Block, output: &BlockExecutionOutput<::Receipt>, message: &BidTrace, ) -> Result<(), ValidationApiError> { @@ -279,7 +279,7 @@ where (U256::ZERO, U256::ZERO) }; - if let Some(withdrawals) = &block.body.withdrawals { + if let Some(withdrawals) = block.body().withdrawals() { for withdrawal in withdrawals { if withdrawal.address == message.proposer_fee_recipient { balance_before += withdrawal.amount_wei(); @@ -294,7 +294,7 @@ where let (receipt, tx) = output .receipts .last() - .zip(block.body.transactions.last()) + .zip(block.body().transactions().last()) .ok_or(ValidationApiError::ProposerPayment)?; if !receipt.status() { @@ -313,7 +313,7 @@ where return Err(ValidationApiError::ProposerPayment) } - if let Some(block_base_fee) = block.base_fee_per_gas { + if let Some(block_base_fee) = block.header().base_fee_per_gas() { if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { return Err(ValidationApiError::ProposerPayment) } @@ -412,15 +412,12 @@ where Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider + Clone + 'static, E: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, + BlockHeader = Provider::Header, + BlockBody = reth_primitives::BlockBody, >, >, { @@ -476,11 +473,11 @@ where } #[derive(Debug)] -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc, + consensus: Arc>, /// Execution payload validator. payload_validator: ExecutionPayloadValidator, /// Block executor factory. From 7e9d2c1a34fc85aa4cdaab39015c4f36f8cbcd2f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:00:51 -0500 Subject: [PATCH 928/970] chore: remove unused trie-common alloy-serde dep (#13201) --- crates/trie/common/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 73fce5f8e7b..4f6a927d434 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -57,7 +57,7 @@ serde_with.workspace = true [features] eip1186 = [ - "dep:alloy-rpc-types-eth", + "alloy-rpc-types-eth/serde", "dep:alloy-serde", ] serde = [ @@ -88,7 +88,7 @@ test-utils = [ arbitrary = [ "alloy-trie/arbitrary", "dep:arbitrary", - "alloy-serde/arbitrary", + "alloy-serde?/arbitrary", "reth-primitives-traits/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", From 52b8ff4b0c4e5bb560376571bc35e0268e9df587 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:29:49 -0500 Subject: [PATCH 929/970] chore: fix `cargo check -p reth-stages --tests` (#13200) --- crates/stages/stages/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index f97214f4643..e7114eeb16a 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -70,6 +70,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true +reth-stages-api = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } From 4d2c5767ec44004af023500dafb7ee9a1193cd72 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 12:30:36 +0700 Subject: [PATCH 930/970] perf(txpool): remove more clones (#13189) --- crates/transaction-pool/src/pool/txpool.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 5820b5f894a..dd6da1d0fef 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -462,10 +462,12 @@ impl TxPool { &mut self, changed_senders: FxHashMap, ) -> UpdateOutcome { - // track changed accounts - self.sender_info.extend(changed_senders.clone()); // Apply the state changes to the total set of transactions which triggers sub-pool updates. - let updates = self.all_transactions.update(changed_senders); + let updates = self.all_transactions.update(&changed_senders); + + // track changed accounts + self.sender_info.extend(changed_senders); + // Process the sub-pool updates let update = self.process_updates(updates); // update the metrics after the update @@ -1183,7 +1185,7 @@ impl AllTransactions { /// that got transaction included in the block. pub(crate) fn update( &mut self, - changed_accounts: FxHashMap, + changed_accounts: &FxHashMap, ) -> Vec { // pre-allocate a few updates let mut updates = Vec::with_capacity(64); @@ -1240,7 +1242,7 @@ impl AllTransactions { } } - changed_balance = Some(info.balance); + changed_balance = Some(&info.balance); } // If there's a nonce gap, we can shortcircuit, because there's nothing to update yet. @@ -1291,7 +1293,7 @@ impl AllTransactions { // If the account changed in the block, check the balance. if let Some(changed_balance) = changed_balance { - if cumulative_cost > changed_balance { + if &cumulative_cost > changed_balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { From 6b35b059931bd94cb76f749a60db50ff4c8e1ab8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 7 Dec 2024 09:30:56 +0400 Subject: [PATCH 931/970] feat: relax bounds for `EthPubSub` (#13203) Co-authored-by: Matthias Seitz --- crates/node/api/src/node.rs | 3 +- crates/node/builder/src/builder/states.rs | 9 +-- crates/node/builder/src/components/mod.rs | 5 +- crates/node/builder/src/rpc.rs | 41 ++++------- crates/optimism/node/src/node.rs | 6 +- crates/rpc/rpc-builder/src/lib.rs | 79 +++++++++------------- crates/rpc/rpc-eth-types/src/logs_utils.rs | 9 +-- crates/rpc/rpc/src/eth/pubsub.rs | 61 ++++------------- 8 files changed, 74 insertions(+), 139 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index fc6366a2eb5..edb68a6589b 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -61,7 +61,8 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Network: FullNetwork; /// Builds new blocks. - type PayloadBuilder: PayloadBuilder + Clone; + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 16b7d668ca3..fa12cc78b61 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -13,9 +13,7 @@ use crate::{ AddOns, FullNode, }; use reth_exex::ExExContext; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, PayloadBuilder, -}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB}; use reth_node_core::node_config::NodeConfig; use reth_tasks::TaskExecutor; use std::{fmt, future::Future}; @@ -88,10 +86,7 @@ impl> FullNodeTypes for NodeAdapter type Provider = T::Provider; } -impl> FullNodeComponents for NodeAdapter -where - C::PayloadBuilder: PayloadBuilder, -{ +impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index b643e2aa2a6..d62e74bda29 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -26,7 +26,7 @@ use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -52,7 +52,8 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Network: FullNetwork; /// Builds new blocks. - type PayloadBuilder: Clone; + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index a4010e52db3..e6c9ad23356 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -10,8 +10,8 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, - PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, NodeTypes, + NodeTypesWithEngine, }; use reth_node_core::{ node_config::NodeConfig, @@ -19,7 +19,7 @@ use reth_node_core::{ }; use reth_payload_builder::PayloadStore; use reth_primitives::EthPrimitives; -use reth_provider::{providers::ProviderNodeTypes, BlockReader}; +use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, EthApi, @@ -33,7 +33,6 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use crate::EthApiBuilderCtx; @@ -404,18 +403,17 @@ where impl RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, - Pool: TransactionPool::Transaction>, - >, - EthApi: EthApiTypes - + FullEthApiServer< - Provider: BlockReader< + Types: ProviderNodeTypes< + Primitives: NodePrimitives< Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, - > + AddDevSigners + >, + >, + EthApi: EthApiTypes + + FullEthApiServer + + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -535,19 +533,10 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents< - Types: ProviderNodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, - Pool: TransactionPool::Transaction>, - >, + N: FullNodeComponents>, EthApi: EthApiTypes - + FullEthApiServer< - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - >, - > + AddDevSigners + + FullEthApiServer + + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b7dcf2741c6..35e33ccd75a 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -12,9 +12,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; -use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, TxTy, -}; +use reth_node_api::{AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, TxTy}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, @@ -241,7 +239,6 @@ impl NodeAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -287,7 +284,6 @@ impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 949e377afb1..1220020504b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -208,7 +208,7 @@ use reth_primitives::NodePrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, - ReceiptProvider, StateProviderFactory, + StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, @@ -286,18 +286,19 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, >, @@ -647,16 +648,21 @@ impl RpcModuleBuilder where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + > + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, >, @@ -685,15 +691,11 @@ where EngineApi: EngineApiServer, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, { let Self { provider, @@ -741,13 +743,16 @@ where /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; + /// use reth_primitives::TransactionSigned; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; /// - /// fn init + 'static>(evm: Evm) { + /// fn init + 'static>( + /// evm: Evm, + /// ) { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -769,11 +774,6 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - >, { let Self { provider, @@ -809,15 +809,11 @@ where where EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Receipt = ::Receipt, + Block = ::Block, + Header = ::BlockHeader, >, >, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, Pool: TransactionPool::Transaction>, { let mut modules = TransportRpcModules::default(); @@ -1155,8 +1151,7 @@ where RpcReceipt, RpcHeader, > + EthApiTypes, - BlockExecutor: - BlockExecutorProvider>, + BlockExecutor: BlockExecutorProvider, { /// Register Eth Namespace /// @@ -1190,17 +1185,8 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_debug(&mut self) -> &mut Self where - EthApi: EthApiSpec - + EthTransactions< - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - >, - > + TraceExt, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, + EthApi: EthApiSpec + EthTransactions + TraceExt, + BlockExecutor::Primitives: NodePrimitives>, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1303,8 +1289,7 @@ where pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, - BlockExecutor: - BlockExecutorProvider>, + BlockExecutor::Primitives: NodePrimitives>, { DebugApi::new( self.eth_api().clone(), @@ -1363,10 +1348,8 @@ where >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, >, >, Consensus: reth_consensus::FullConsensus + Clone + 'static, diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 6078d32e894..8b2dbaa5441 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -8,27 +8,28 @@ use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; /// Returns all matching of a block's receipts when the transaction hashes are known. -pub fn matching_block_logs_with_tx_hashes<'a, I>( +pub fn matching_block_logs_with_tx_hashes<'a, I, R>( filter: &FilteredParams, block_num_hash: BlockNumHash, tx_hashes_and_receipts: I, removed: bool, ) -> Vec where - I: IntoIterator, + I: IntoIterator, + R: TxReceipt + 'a, { let mut all_logs = Vec::new(); // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; // Iterate over transaction hashes and receipts and append matching logs. for (receipt_idx, (tx_hash, receipt)) in tx_hashes_and_receipts.into_iter().enumerate() { - for log in &receipt.logs { + for log in receipt.logs() { if log_matches_filter(block_num_hash, log, filter) { let log = Log { inner: log.clone(), diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 596af187635..fc02b0da067 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,13 +2,9 @@ use std::sync::Arc; -use alloy_eips::eip2718::Encodable2718; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ - pubsub::{ - Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, - SyncStatusMetadata, - }, + pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata}, FilteredParams, Header, Log, }; use futures::StreamExt; @@ -68,13 +64,7 @@ impl EthPubSub { #[async_trait::async_trait] impl EthPubSubApiServer> for EthPubSub where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + Clone - + 'static, + Events: CanonStateSubscriptions + 'static, Eth: RpcNodeCore + EthApiTypes>> + 'static, @@ -104,23 +94,13 @@ async fn handle_accepted( params: Option, ) -> Result<(), ErrorObject<'static>> where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - SignedTx: Encodable2718, - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + Clone - + 'static, + Events: CanonStateSubscriptions + 'static, Eth: RpcNodeCore + EthApiTypes>>, { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub - .new_headers_stream() - .map(|header| EthSubscriptionResult::<()>::Header(Box::new(header.into()))); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.new_headers_stream()).await } SubscriptionKind::Logs => { // if no params are provided, used default filter params @@ -131,10 +111,7 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub - .log_stream(filter) - .map(|log| EthSubscriptionResult::<()>::Log(Box::new(log))); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.log_stream(filter)).await } SubscriptionKind::NewPendingTransactions => { if let Some(params) = params { @@ -146,9 +123,7 @@ where tx.transaction.to_consensus(), pubsub.eth_api.tx_resp_builder(), ) { - Ok(tx) => { - Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) - } + Ok(tx) => Some(tx), Err(err) => { error!(target = "rpc", %err, @@ -172,10 +147,7 @@ where } } - let stream = pubsub - .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::<()>::TransactionHash); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.pending_transaction_hashes_stream()).await } SubscriptionKind::Syncing => { // get new block subscription @@ -285,7 +257,7 @@ where Eth: RpcNodeCore, { /// Returns the current sync status for the `syncing` subscription - fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { + fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus { if is_syncing { let current_block = self .eth_api @@ -293,14 +265,14 @@ where .chain_info() .map(|info| info.best_number) .unwrap_or_default(); - EthSubscriptionResult::SyncState(PubSubSyncStatus::Detailed(SyncStatusMetadata { + PubSubSyncStatus::Detailed(SyncStatusMetadata { syncing: true, starting_block: 0, current_block, highest_block: Some(current_block), - })) + }) } else { - EthSubscriptionResult::SyncState(PubSubSyncStatus::Simple(false)) + PubSubSyncStatus::Simple(false) } } } @@ -324,15 +296,12 @@ where impl EthPubSubInner where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - >, + Events: CanonStateSubscriptions, { /// Returns a stream that yields all new RPC blocks. - fn new_headers_stream(&self) -> impl Stream { + fn new_headers_stream( + &self, + ) -> impl Stream::BlockHeader>> { self.chain_events.canonical_state_stream().flat_map(|new_chain| { let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( From 4fa86c54840919ab8b9c4361169394eaff2759c3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 7 Dec 2024 00:22:19 -0600 Subject: [PATCH 932/970] Add placeholder `OpHardfork::Isthmus` (#13112) Co-authored-by: Matthias Seitz --- crates/optimism/chainspec/src/lib.rs | 11 ++++++++++- crates/optimism/evm/src/config.rs | 8 ++++++-- crates/optimism/hardforks/src/hardfork.rs | 7 ++++++- crates/optimism/hardforks/src/lib.rs | 12 +++++++++--- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 0ee86bc7d24..907599fe2a2 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -36,7 +36,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OpHardforks; +use reth_optimism_forks::{OpHardfork, OpHardforks}; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -166,6 +166,13 @@ impl OpChainSpecBuilder { self } + /// Enable Isthmus at genesis + pub fn isthmus_activated(mut self) -> Self { + self = self.holocene_activated(); + self.inner = self.inner.with_fork(OpHardfork::Isthmus, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`OpChainSpec`]. /// /// # Panics @@ -414,6 +421,7 @@ impl From for OpChainSpec { (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), (OpHardfork::Granite.boxed(), genesis_info.granite_time), (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), + (OpHardfork::Isthmus.boxed(), genesis_info.isthmus_time), ]; let mut time_hardforks = time_hardfork_opts @@ -1030,6 +1038,7 @@ mod tests { OpHardfork::Fjord.boxed(), OpHardfork::Granite.boxed(), OpHardfork::Holocene.boxed(), + // OpHardfork::Isthmus.boxed(), ]; assert!(expected_hardforks diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 4a37860efc6..b32b0929424 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -12,7 +12,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { revm_primitives::HOLOCENE } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE @@ -31,7 +33,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_head(block) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { revm_primitives::HOLOCENE } else if chain_spec.fork(OpHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 661816ae5fe..962d7bca4bc 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -33,6 +33,8 @@ hardfork!( Granite, /// Holocene: Holocene, + /// Isthmus: + Isthmus, } ); @@ -159,6 +161,7 @@ impl OpHardfork { Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), Self::Holocene => Some(1732633200), + Self::Isthmus => todo!(), }, ) } @@ -194,6 +197,7 @@ impl OpHardfork { Self::Fjord => Some(1720627201), Self::Granite => Some(1726070401), Self::Holocene => None, + Self::Isthmus => todo!(), }, ) } @@ -357,7 +361,7 @@ mod tests { #[test] fn check_op_hardfork_from_str() { let hardfork_str = - ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE"]; + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE", "isthMUS"]; let expected_hardforks = [ OpHardfork::Bedrock, OpHardfork::Regolith, @@ -366,6 +370,7 @@ mod tests { OpHardfork::Fjord, OpHardfork::Granite, OpHardfork::Holocene, + OpHardfork::Isthmus, ]; let hardforks: Vec = diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index bf6ca98ce4e..36f42155e94 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -27,6 +27,12 @@ pub trait OpHardforks: EthereumHardforks { self.fork(OpHardfork::Bedrock).active_at_block(block_number) } + /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + } + /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) @@ -53,9 +59,9 @@ pub trait OpHardforks: EthereumHardforks { self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) } - /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// Returns `true` if [`Isthmus`](OpHardfork::Isthmus) is active at given block /// timestamp. - fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + fn is_isthmus_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) } } From 42a1ba3a824f19c4df7c7a25f7af4372b5862152 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 06:26:01 -0500 Subject: [PATCH 933/970] chore: make zip_blocks generic over header (#13199) --- crates/net/downloaders/src/bodies/test_utils.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index 781d1d93ba5..ca35c7449a0 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -10,10 +10,10 @@ use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader}; use std::collections::HashMap; -pub(crate) fn zip_blocks<'a>( - headers: impl Iterator, - bodies: &mut HashMap, -) -> Vec { +pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( + headers: impl Iterator>, + bodies: &mut HashMap, +) -> Vec> { headers .into_iter() .map(|header| { From 410d361638170141c11fd91b6cf9ff399a54c48d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 13:17:11 +0100 Subject: [PATCH 934/970] chore: move calculate tx root to blockbody trait (#13209) --- crates/consensus/common/src/validation.rs | 5 ++--- crates/primitives-traits/src/block/body.rs | 7 ++++++- crates/primitives/src/traits.rs | 9 --------- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 2d681be449a..2c38fa2d6cd 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -7,9 +7,8 @@ use alloy_eips::{ }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - BlockBody, BlockBodyTxExt, EthereumHardfork, GotExpected, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::BlockBody as _; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index b0fe1e3d082..cec32999070 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -7,7 +7,7 @@ use crate::{ use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; -use alloy_primitives::Bytes; +use alloy_primitives::{Bytes, B256}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -44,6 +44,11 @@ pub trait BlockBody: /// Consume the block body and return a [`Vec`] of transactions. fn into_transactions(self) -> Vec; + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256 { + alloy_consensus::proofs::calculate_transaction_root(self.transactions()) + } + /// Returns block withdrawals if any. fn withdrawals(&self) -> Option<&Withdrawals>; diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs index 73eabd8ec98..3f009bba84b 100644 --- a/crates/primitives/src/traits.rs +++ b/crates/primitives/src/traits.rs @@ -3,7 +3,6 @@ use crate::{ BlockWithSenders, SealedBlock, }; use alloc::vec::Vec; -use alloy_eips::eip2718::Encodable2718; use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; use revm_primitives::{Address, B256}; @@ -91,14 +90,6 @@ impl BlockExt for T {} /// Extension trait for [`BlockBody`] adding helper methods operating with transactions. pub trait BlockBodyTxExt: BlockBody { - /// Calculate the transaction root for the block body. - fn calculate_tx_root(&self) -> B256 - where - Self::Transaction: Encodable2718, - { - crate::proofs::calculate_transaction_root(self.transactions()) - } - /// Recover signer addresses for all transactions in the block body. fn recover_signers(&self) -> Option> where From 828ddbaca43406c33fda762b9d310e6897cfd703 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 7 Dec 2024 14:13:21 +0100 Subject: [PATCH 935/970] chore(engine): refactor code to transform EvmState into HashedPostState (#13207) --- crates/engine/tree/src/tree/root.rs | 89 ++++++++++++----------------- 1 file changed, 35 insertions(+), 54 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 2d00feba50d..dc0563ade50 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -181,6 +181,38 @@ impl Drop for StateHookSender { } } +fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { + let mut hashed_state = HashedPostState::default(); + + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + + let destroyed = account.is_selfdestructed(); + let info = if destroyed { None } else { Some(account.info.into()) }; + hashed_state.accounts.insert(hashed_address, info); + + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value.is_changed().then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); + } + } + } + + hashed_state +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -258,33 +290,7 @@ where proof_sequence_number: u64, state_root_message_sender: Sender, ) { - let mut hashed_state_update = HashedPostState::default(); - for (address, account) in update { - if account.is_touched() { - let hashed_address = keccak256(address); - trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); - - let destroyed = account.is_selfdestructed(); - let info = if destroyed { None } else { Some(account.info.into()) }; - hashed_state_update.accounts.insert(hashed_address, info); - - let mut changed_storage_iter = account - .storage - .into_iter() - .filter_map(|(slot, value)| { - value - .is_changed() - .then(|| (keccak256(B256::from(slot)), value.present_value)) - }) - .peekable(); - if destroyed || changed_storage_iter.peek().is_some() { - hashed_state_update.storages.insert( - hashed_address, - HashedStorage::from_iter(destroyed, changed_storage_iter), - ); - } - } - } + let hashed_state_update = evm_state_to_hashed_post_state(update); let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); for (address, slots) in &proof_targets { @@ -696,34 +702,9 @@ mod tests { } for update in &state_updates { - for (address, account) in update { - let hashed_address = keccak256(*address); - - if account.is_touched() { - let destroyed = account.is_selfdestructed(); - hashed_state.accounts.insert( - hashed_address, - if destroyed || account.is_empty() { - None - } else { - Some(account.info.clone().into()) - }, - ); - - if destroyed || !account.storage.is_empty() { - let storage = account - .storage - .iter() - .filter(|&(_slot, value)| (!destroyed && value.is_changed())) - .map(|(slot, value)| { - (keccak256(B256::from(*slot)), value.present_value) - }); - hashed_state - .storages - .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); - } - } + hashed_state.extend(evm_state_to_hashed_post_state(update.clone())); + for (address, account) in update { let storage: HashMap = account .storage .iter() From abaeb35fd17acd9705fc8b23ed52c8f4f40368f4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 14:29:42 +0100 Subject: [PATCH 936/970] chore: make reth-network-peers risc compatible (#13210) --- .github/assets/check_rv32imac.sh | 1 + Cargo.lock | 8 ++++---- Cargo.toml | 2 +- crates/net/peers/Cargo.toml | 9 +++++---- crates/net/peers/src/lib.rs | 3 ++- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index ab1151bfb0c..075ffb6dc40 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -8,6 +8,7 @@ crates_to_check=( reth-ethereum-primitives reth-primitives-traits reth-optimism-forks + reth-network-peers # reth-evm # reth-primitives # reth-optimism-chainspec diff --git a/Cargo.lock b/Cargo.lock index fb3910d40e5..7ee2e9b6d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,9 +428,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +checksum = "f542548a609dca89fcd72b3b9f355928cf844d4363c5eed9c5273a3dd225e097" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -439,9 +439,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 650be8337b5..142b00290b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -432,7 +432,7 @@ revm-primitives = { version = "14.0.0", default-features = false } alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.11" alloy-primitives = { version = "0.8.11", default-features = false } -alloy-rlp = { version = "0.3.4", default-features = false } +alloy-rlp = { version = "0.3.10", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 4cfc0aee3d6..9e7ccc3084d 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -15,8 +15,8 @@ workspace = true # eth alloy-primitives = { workspace = true, features = ["rlp"] } -alloy-rlp = { workspace = true, features = ["derive"] } -enr.workspace = true +alloy-rlp = { workspace = true, features = ["derive", "core-net", "core-error"] } +enr = { workspace = true, optional = true } # crypto @@ -32,6 +32,7 @@ alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } serde_json.workspace = true +enr.workspace = true tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] @@ -42,7 +43,7 @@ std = [ "secp256k1?/std", "serde_with/std", "thiserror/std", - "url/std" + "url/std", ] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] -net = ["dep:tokio", "tokio?/net"] +net = ["std", "dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 3e2777c2df8..a8bf51da2ee 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -64,6 +64,7 @@ use alloy_primitives::B512; use core::str::FromStr; // Re-export PeerId for ease of use. +#[cfg(feature = "secp256k1")] pub use enr::Enr; /// Alias for a peer identifier @@ -115,8 +116,8 @@ pub fn id2pk(id: PeerId) -> Result { pub enum AnyNode { /// An "enode:" peer with full ip NodeRecord(NodeRecord), - #[cfg(feature = "secp256k1")] /// An "enr:" peer + #[cfg(feature = "secp256k1")] Enr(Enr), /// An incomplete "enode" with only a peer id PeerId(PeerId), From 2846dd242e413f7102c96c39334d8a99f5b393a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 20:19:43 +0100 Subject: [PATCH 937/970] chore: flip tx conversion impl (#13208) --- crates/primitives/src/transaction/mod.rs | 28 ++++++++++++++++++ crates/primitives/src/transaction/pooled.rs | 32 ++------------------- crates/transaction-pool/src/traits.rs | 3 +- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f7211489e93..670ee7f352e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -846,6 +846,34 @@ impl TransactionSigned { &self.transaction } + /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. + /// + /// This function used as a helper to convert from a decoded p2p broadcast message to + /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on + /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. + pub fn try_into_pooled(self) -> Result { + let hash = self.hash(); + match self { + Self { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(PooledTransactionsElement::Legacy(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip2930(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip1559(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip7702(Signed::new_unchecked(tx, signature, hash))) + } + // Not supported because missing blob sidecar + tx @ Self { transaction: Transaction::Eip4844(_), .. } => Err(tx), + #[cfg(feature = "optimism")] + // Not supported because deposit transactions are never pooled + tx @ Self { transaction: Transaction::Deposit(_), .. } => Err(tx), + } + } + /// Transaction hash. Used to identify transaction. pub fn hash(&self) -> TxHash { *self.tx_hash() diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 93a3c182322..eea10d44c9f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -46,34 +46,6 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { - /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. - /// - /// This function used as a helper to convert from a decoded p2p broadcast message to - /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on - /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. - pub fn try_from_broadcast(tx: TransactionSigned) -> Result { - let hash = tx.hash(); - match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { - Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { - Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { - Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { - Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) - } - // Not supported because missing blob sidecar - tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), - #[cfg(feature = "optimism")] - // Not supported because deposit transactions are never pooled - tx @ TransactionSigned { transaction: Transaction::Deposit(_), .. } => Err(tx), - } - } - /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// @@ -650,7 +622,7 @@ impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; fn try_from(tx: TransactionSigned) -> Result { - Self::try_from_broadcast(tx).map_err(|_| TransactionConversionError::UnsupportedForP2P) + tx.try_into_pooled().map_err(|_| TransactionConversionError::UnsupportedForP2P) } } @@ -679,7 +651,7 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from_broadcast(tx_signed) { + match tx_signed.try_into_pooled() { Ok(tx) => Ok(tx), Err(tx) => { let (tx, sig, hash) = tx.into_parts(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2859d71b9d1..a0d4d40983e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1267,7 +1267,8 @@ impl PoolTransaction for EthPooledTransaction { tx: RecoveredTx, ) -> Result, Self::TryFromConsensusError> { let (tx, signer) = tx.to_components(); - let pooled = PooledTransactionsElement::try_from_broadcast(tx) + let pooled = tx + .try_into_pooled() .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; Ok(RecoveredTx::from_signed_transaction(pooled, signer)) } From 08b875f4f5bc5930557b188ef1f23b82020b17c5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 22:15:32 +0100 Subject: [PATCH 938/970] chore: feature gate reth-codecs in trie-common (#13215) --- crates/stages/types/Cargo.toml | 1 + crates/trie/common/Cargo.toml | 16 ++++++++++++---- crates/trie/common/src/hash_builder/state.rs | 8 +++++--- crates/trie/common/src/nibbles.rs | 12 +++++++----- crates/trie/common/src/storage.rs | 4 ++-- crates/trie/common/src/subnode.rs | 8 +++++--- 6 files changed, 32 insertions(+), 17 deletions(-) diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index d8ab6355257..0243415942b 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -38,6 +38,7 @@ reth-codec = [ "dep:reth-codecs", "dep:bytes", "dep:modular-bitfield", + "reth-trie-common/reth-codec" ] test-utils = [ "dep:arbitrary", diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 4f6a927d434..eadbb3176b5 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -18,14 +18,14 @@ alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie.workspace = true alloy-consensus.workspace = true reth-primitives-traits.workspace = true -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } revm-primitives.workspace = true alloy-genesis.workspace = true alloy-rpc-types-eth = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } -bytes.workspace = true +bytes = { workspace = true, optional = true } derive_more.workspace = true itertools.workspace = true nybbles = { workspace = true, features = ["rlp"] } @@ -42,8 +42,11 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-codecs.workspace = true + alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } +bytes.workspace = true hash-db = "=0.15.2" plain_hasher = "0.2" arbitrary = { workspace = true, features = ["derive"] } @@ -62,7 +65,7 @@ eip1186 = [ ] serde = [ "dep:serde", - "bytes/serde", + "bytes?/serde", "nybbles/serde", "alloy-primitives/serde", "alloy-consensus/serde", @@ -70,7 +73,11 @@ serde = [ "alloy-rpc-types-eth?/serde", "revm-primitives/serde", "reth-primitives-traits/serde", - "reth-codecs/serde" + "reth-codecs?/serde" +] +reth-codec = [ + "dep:reth-codecs", + "dep:bytes", ] serde-bincode-compat = [ "serde", @@ -86,6 +93,7 @@ test-utils = [ "reth-codecs/test-utils", ] arbitrary = [ + "dep:reth-codecs", "alloy-trie/arbitrary", "dep:arbitrary", "alloy-serde?/arbitrary", diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index ec6b102d44e..4bf3bade398 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -1,8 +1,6 @@ use crate::TrieMask; use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; -use bytes::Buf; use nybbles::Nibbles; -use reth_codecs::Compact; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. @@ -63,7 +61,8 @@ impl From for HashBuilderState { } } -impl Compact for HashBuilderState { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for HashBuilderState { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -106,6 +105,8 @@ impl Compact for HashBuilderState { } fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let (key, mut buf) = Vec::from_compact(buf, 0); let stack_len = buf.get_u16() as usize; @@ -150,6 +151,7 @@ impl Compact for HashBuilderState { #[cfg(test)] mod tests { use super::*; + use reth_codecs::Compact; #[test] fn hash_builder_state_regression() { diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 2d4e34b3e3b..b1cc2f10c56 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,7 +1,4 @@ -use bytes::Buf; use derive_more::Deref; -use reth_codecs::Compact; - pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. @@ -45,7 +42,8 @@ impl core::borrow::Borrow<[u8]> for StoredNibbles { } } -impl Compact for StoredNibbles { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibbles { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -55,6 +53,8 @@ impl Compact for StoredNibbles { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let nibbles = &buf[..len]; buf.advance(len); (Self(Nibbles::from_nibbles_unchecked(nibbles)), buf) @@ -88,7 +88,8 @@ impl From for Nibbles { } } -impl Compact for StoredNibblesSubKey { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibblesSubKey { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -114,6 +115,7 @@ impl Compact for StoredNibblesSubKey { mod tests { use super::*; use bytes::BytesMut; + use reth_codecs::Compact; #[test] fn test_stored_nibbles_from_nibbles() { diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index cf2945d9101..3ebcc4e810e 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,5 +1,4 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; -use reth_codecs::Compact; /// Account storage trie node. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -14,7 +13,8 @@ pub struct StorageTrieEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageTrieEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageTrieEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/trie/common/src/subnode.rs b/crates/trie/common/src/subnode.rs index c64b2317cf3..de65a788780 100644 --- a/crates/trie/common/src/subnode.rs +++ b/crates/trie/common/src/subnode.rs @@ -1,6 +1,4 @@ use super::BranchNodeCompact; -use bytes::Buf; -use reth_codecs::Compact; /// Walker sub node for storing intermediate state root calculation state in the database. #[derive(Debug, Clone, PartialEq, Eq, Default)] @@ -13,7 +11,8 @@ pub struct StoredSubNode { pub node: Option, } -impl Compact for StoredSubNode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredSubNode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -46,6 +45,8 @@ impl Compact for StoredSubNode { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let key_len = buf.get_u16() as usize; let key = Vec::from(&buf[..key_len]); buf.advance(key_len); @@ -69,6 +70,7 @@ mod tests { use super::*; use crate::TrieMask; use alloy_primitives::B256; + use reth_codecs::Compact; #[test] fn subnode_roundtrip() { From 73785ccf0517db5243040e5e70762f7b6aef831a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 8 Dec 2024 12:27:31 +0000 Subject: [PATCH 939/970] chore(deps): weekly `cargo update` (#13216) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 326 ++++++++++++++++++++++++++--------------------------- 1 file changed, 161 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ee2e9b6d47..0dfce7a2809 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,15 +100,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.47" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" +checksum = "a0161082e0edd9013d23083465cc04b20e44b7a15646d36ba7b0cdb7cd6fe18f" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08234c0eece0e08602db5095a16dc942cad91967cccfcfc2c6a42c25563964f" +checksum = "fa60357dda9a3d0f738f18844bd6d0f4a5924cc5cf00bfad2ff1369897966123" dependencies = [ "alloy-consensus", "alloy-eips", @@ -170,7 +170,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -275,7 +275,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -301,14 +301,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] name = "alloy-network-primitives" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18abfc73ce48f074c8bc6e05c1f08ef0b1ddc9b04f191a821d0beb9470a42a29" +checksum = "c20219d1ad261da7a6331c16367214ee7ded41d001fabbbd656fbf71898b2773" dependencies = [ "alloy-consensus", "alloy-eips", @@ -329,7 +329,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "url", ] @@ -351,7 +351,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.2", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -400,7 +400,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", "url", @@ -512,9 +512,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abca110e59f760259e26d0c84912121468008aba48dd227af0f306cfd7bce9ae" +checksum = "200661999b6e235d9840be5d60a6e8ae2f0af9eb2a256dd378786744660e36ec" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -533,7 +533,7 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -614,7 +614,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -652,7 +652,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -670,7 +670,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -696,7 +696,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.6.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", @@ -755,7 +755,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tower 0.5.1", "tracing", @@ -907,9 +907,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquamarine" @@ -1395,7 +1395,7 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.6.0", + "indexmap 2.7.0", "num-bigint", "rustc-hash 2.1.0", ] @@ -1421,7 +1421,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.6.0", + "indexmap 2.7.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1467,7 +1467,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.6.0", + "indexmap 2.7.0", "once_cell", "phf", "rustc-hash 2.1.0", @@ -1692,9 +1692,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -1793,9 +1793,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1803,9 +1803,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1827,9 +1827,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "coins-bip32" @@ -1975,9 +1975,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" dependencies = [ "const_format_proc_macros", "konst", @@ -1985,9 +1985,9 @@ dependencies = [ [[package]] name = "const_format_proc_macros" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", "quote", @@ -2654,7 +2654,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "walkdir", ] @@ -2783,12 +2783,11 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfbba28f4f3f32d92c06a64f5bf6c4537b5d4e21f28c689bd2bbaecfea4e0d3e" +checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" dependencies = [ "alloy-primitives", - "derivative", "ethereum_serde_utils", "itertools 0.13.0", "serde", @@ -2799,9 +2798,9 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d37845ba7c16bf4be8be4b5786f03a2ba5f2fda0d7f9e7cb2282f69cff420d7" +checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" dependencies = [ "darling", "proc-macro2", @@ -2830,7 +2829,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -2918,7 +2917,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -3420,9 +3419,9 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generator" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" dependencies = [ "cfg-if", "libc", @@ -3558,7 +3557,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3719,9 +3718,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -4153,9 +4152,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "arbitrary", "equivalent", @@ -4182,7 +4181,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.7.0", "is-terminal", "itoa", "log", @@ -4370,9 +4369,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", @@ -4911,7 +4910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "metrics-util", "quanta", @@ -4943,7 +4942,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "ordered-float", "quanta", @@ -5110,9 +5109,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint", @@ -5362,7 +5361,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5377,7 +5376,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5414,7 +5413,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "unsigned-varint", ] @@ -5468,7 +5467,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5548,9 +5547,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5559,20 +5558,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 1.0.109", ] [[package]] @@ -5638,12 +5636,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.5", "ucd-trie", ] @@ -6093,7 +6091,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -6112,7 +6110,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.5", "tinyvec", "tracing", "web-time", @@ -6570,7 +6568,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -6606,7 +6604,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tower 0.4.13", "tracing", @@ -6660,7 +6658,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -6816,7 +6814,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tikv-jemallocator", "tracy-client", ] @@ -6962,7 +6960,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7019,7 +7017,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -7061,7 +7059,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -7086,7 +7084,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7112,7 +7110,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -7151,7 +7149,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7228,7 +7226,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7284,7 +7282,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -7311,7 +7309,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", ] @@ -7366,7 +7364,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7412,7 +7410,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7445,7 +7443,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7474,7 +7472,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7540,7 +7538,7 @@ dependencies = [ "proptest-derive", "rustc-hash 2.1.0", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7645,7 +7643,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7740,7 +7738,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -7767,7 +7765,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7810,7 +7808,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7827,7 +7825,7 @@ dependencies = [ "criterion", "dashmap 6.1.0", "derive_more", - "indexmap 2.6.0", + "indexmap 2.7.0", "parking_lot", "pprof", "rand 0.8.5", @@ -7835,7 +7833,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -7874,7 +7872,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7934,7 +7932,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7959,7 +7957,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", ] @@ -7997,7 +7995,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "url", ] @@ -8028,7 +8026,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "zstd", ] @@ -8162,7 +8160,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "toml", "tracing", @@ -8492,7 +8490,7 @@ dependencies = [ "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -8560,7 +8558,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -8625,7 +8623,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -8800,7 +8798,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -8821,7 +8819,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", "toml", ] @@ -8907,7 +8905,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tower 0.4.13", @@ -9001,7 +8999,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-util", "tower 0.4.13", @@ -9042,7 +9040,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9127,7 +9125,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9228,7 +9226,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9255,7 +9253,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9361,7 +9359,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", "tracing-futures", @@ -9445,7 +9443,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9561,7 +9559,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9587,7 +9585,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -9607,9 +9605,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41bbeb6004cc4ed48d27756f0479011df91a6f5642a3abab9309eda5ce67c4ad" +checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9766,9 +9764,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81dc953b2244ddd5e7860cb0bb2a790494b898ef321d4aff8e260efab60cc88" +checksum = "395b0c39c00f9296f3937624c1fa4e0ee44f8c0e4b2c49408179ef381c6c2e6e" dependencies = [ "bytemuck", "byteorder", @@ -10226,7 +10224,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -10286,7 +10284,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -10530,9 +10528,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -10821,11 +10819,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "643caef17e3128658ff44d85923ef2d28af81bb71e0d67bbfe1d76f19a73e053" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.5", ] [[package]] @@ -10841,9 +10839,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "995d0bbc9995d1f19d28b7215a9352b0fc3cd3a2d2ec95c2cadc485cdedbcdde" dependencies = [ "proc-macro2", "quote", @@ -10902,9 +10900,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -10926,9 +10924,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -10980,9 +10978,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -11009,20 +11007,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -11048,9 +11045,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -11088,7 +11085,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -11295,9 +11292,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.4" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" +checksum = "51e295eae54124872df35720dc3a5b1e827c7deee352b342ec7f7e626d0d0ef3" dependencies = [ "loom", "once_cell", @@ -11654,9 +11651,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -11665,13 +11662,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -11680,9 +11676,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.47" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -11693,9 +11689,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11703,9 +11699,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -11716,9 +11712,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-streams" @@ -11749,9 +11745,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", From 465692b5aff5718f10725077a9caf0a7b7c55297 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 10:57:50 +0100 Subject: [PATCH 940/970] test: add tracing test (#13221) --- crates/rpc/rpc-testing-util/tests/it/trace.rs | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index e67946f7b0a..47932bd7302 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -126,3 +126,47 @@ async fn debug_trace_block_entire_chain() { } println!("Traced all blocks in {:?}", now.elapsed()); } + +/// This is intended to be run locally against a running node. This traces all blocks for a given +/// chain. +/// +/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set. +#[tokio::test(flavor = "multi_thread")] +async fn debug_trace_block_opcodes_entire_chain() { + let opcodes7702 = ["EXTCODESIZE", "EXTCODECOPY", "EXTCODEHASH"]; + let url = parse_env_url("RETH_RPC_TEST_NODE_URL"); + if url.is_err() { + return + } + let url = url.unwrap(); + + let client = HttpClientBuilder::default().build(url).unwrap(); + let current_block: u64 = + >::block_number(&client) + .await + .unwrap() + .try_into() + .unwrap(); + let range = 0..=current_block; + println!("Tracing blocks {range:?} for opcodes"); + let mut stream = client.trace_block_opcode_gas_unordered(range, 2).enumerate(); + let now = Instant::now(); + while let Some((num, next)) = stream.next().await { + match next { + Ok((block_opcodes, block)) => { + for opcode in opcodes7702 { + if block_opcodes.contains(opcode) { + eprintln!("Found opcode {opcode}: in {block}"); + } + } + } + Err((err, block)) => { + eprintln!("Error tracing block {block:?}: {err}"); + } + }; + if num % 10000 == 0 { + println!("Traced {num} blocks"); + } + } + println!("Traced all blocks in {:?}", now.elapsed()); +} From f7a3476046a0fd5d8063a6a191c661c1cf0fc28c Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 9 Dec 2024 11:40:43 +0100 Subject: [PATCH 941/970] chore(engine): simplify StateRootTask creation and hook management (#13213) --- crates/engine/tree/src/tree/root.rs | 31 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index dc0563ade50..ae22b036b65 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,6 +1,7 @@ //! State root task related functionality. use alloy_primitives::map::{HashMap, HashSet}; +use reth_evm::system_calls::OnStateHook; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, StateCommitmentProvider, @@ -20,7 +21,7 @@ use std::{ collections::BTreeMap, ops::Deref, sync::{ - mpsc::{self, Receiver, Sender}, + mpsc::{self, channel, Receiver, Sender}, Arc, }, time::{Duration, Instant}, @@ -249,11 +250,9 @@ where + 'static, { /// Creates a new state root task with the unified message channel - pub(crate) fn new( - config: StateRootConfig, - tx: Sender, - rx: Receiver, - ) -> Self { + pub(crate) fn new(config: StateRootConfig) -> Self { + let (tx, rx) = channel(); + Self { config, rx, @@ -279,6 +278,15 @@ where StateRootHandle::new(rx) } + /// Returns a state hook to be used to send state updates to this task. + pub(crate) fn state_hook(&self) -> impl OnStateHook { + let state_hook = StateHookSender::new(self.tx.clone()); + + move |state: &EvmState| { + let _ = state_hook.send(StateRootMessage::StateUpdate(state.clone())); + } + } + /// Handles state updates. /// /// Returns proof targets derived from the state update. @@ -670,7 +678,6 @@ mod tests { reth_tracing::init_test_tracing(); let factory = create_test_provider_factory(); - let (tx, rx) = std::sync::mpsc::channel(); let state_updates = create_mock_state_updates(10, 10); let mut hashed_state = HashedPostState::default(); @@ -721,16 +728,14 @@ mod tests { consistent_view: ConsistentDbView::new(factory, None), input: Arc::new(TrieInput::from_state(hashed_state)), }; - let task = StateRootTask::new(config, tx.clone(), rx); + let task = StateRootTask::new(config); + let mut state_hook = task.state_hook(); let handle = task.spawn(); - let state_hook_sender = StateHookSender::new(tx); for update in state_updates { - state_hook_sender - .send(StateRootMessage::StateUpdate(update)) - .expect("failed to send state"); + state_hook.on_state(&update); } - drop(state_hook_sender); + drop(state_hook); let (root_from_task, _) = handle.wait_for_result().expect("task failed"); let root_from_base = state_root(accumulated_state); From d68d7c8da0f13acfa356284c4ecf54047f964c25 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 06:15:41 -0500 Subject: [PATCH 942/970] feat: bound NetworkPrimitives types by proper traits (#13196) --- crates/net/eth-wire-types/src/primitives.rs | 43 ++------------------- 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 78083e9e092..17f1943186a 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,7 +1,7 @@ //! Abstraction over primitive types in network messages. use alloy_rlp::{Decodable, Encodable}; -use reth_primitives_traits::{Block, BlockHeader, SignedTransaction}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SignedTransaction}; use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See @@ -10,56 +10,21 @@ pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static { /// The block header type. - type BlockHeader: BlockHeader - + Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BlockHeader: BlockHeader + 'static; /// The block body type. - type BlockBody: Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BlockBody: BlockBody + 'static; /// Full block type. type Block: Block
+ Encodable + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq + 'static; /// The transaction type which peers announce in `Transactions` messages. It is different from /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being /// announced and can only be explicitly requested from peers. - type BroadcastedTransaction: Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BroadcastedTransaction: SignedTransaction + 'static; /// The transaction type which peers return in `PooledTransactions` messages. type PooledTransaction: SignedTransaction + TryFrom + 'static; From 13302ca655d3d21422be683881a4248499f0f1d2 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 9 Dec 2024 13:09:54 +0100 Subject: [PATCH 943/970] feat(db): make init_db function accepts a TableSet (#13222) --- crates/storage/db/src/mdbx.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index d6947e10bd2..c0e11079f3a 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -1,6 +1,6 @@ //! Bindings for [MDBX](https://libmdbx.dqdkfa.ru/). -use crate::is_database_empty; +use crate::{is_database_empty, TableSet, Tables}; use eyre::Context; use std::path::Path; @@ -28,12 +28,21 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu Ok(DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?) } -/// Opens up an existing database or creates a new one at the specified path. Creates tables if -/// necessary. Read/Write mode. +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in [`Tables`] if necessary. Read/Write mode. pub fn init_db>(path: P, args: DatabaseArguments) -> eyre::Result { + init_db_for::(path, args) +} + +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in the given [`TableSet`] if necessary. Read/Write mode. +pub fn init_db_for, TS: TableSet>( + path: P, + args: DatabaseArguments, +) -> eyre::Result { let client_version = args.client_version().clone(); let db = create_db(path, args)?; - db.create_tables()?; + db.create_tables_for::()?; db.record_client_version(client_version)?; Ok(db) } From 233f893a932c2ae1d19266fbdd90838c569609c6 Mon Sep 17 00:00:00 2001 From: greg <82421016+greged93@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:14:45 +0100 Subject: [PATCH 944/970] fix: deny advisory RUSTSEC-2024-0421 (#13227) Signed-off-by: Gregory Edison --- Cargo.lock | 170 +++++++++++++++++---------------- crates/net/dns/Cargo.toml | 4 +- crates/net/dns/src/resolver.rs | 18 ++-- 3 files changed, 101 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0dfce7a2809..fb49d13e872 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1112,6 +1112,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "async-sse" version = "5.1.0" @@ -3666,6 +3677,54 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.8.5", + "serde", + "thiserror 2.0.5", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.5", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -4053,16 +4112,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -4818,15 +4867,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "lz4_flex" version = "0.11.3" @@ -5071,6 +5111,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "moka" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "triomphe", + "uuid", +] + [[package]] name = "more-asserts" version = "0.3.1" @@ -7098,6 +7158,7 @@ dependencies = [ "alloy-rlp", "data-encoding", "enr", + "hickory-resolver", "linked_hash_set", "parking_lot", "rand 0.8.5", @@ -7114,7 +7175,6 @@ dependencies = [ "tokio", "tokio-stream", "tracing", - "trust-dns-resolver", ] [[package]] @@ -10725,6 +10785,12 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -11323,51 +11389,10 @@ dependencies = [ ] [[package]] -name = "trust-dns-proto" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand 0.8.5", - "smallvec", - "thiserror 1.0.69", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.2" +name = "triomphe" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot", - "rand 0.8.5", - "resolv-conf", - "serde", - "smallvec", - "thiserror 1.0.69", - "tokio", - "tracing", - "trust-dns-proto", -] +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" [[package]] name = "try-lock" @@ -11443,27 +11468,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -11528,7 +11538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", "serde", ] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 2f71354a7dd..9e3e93d12f8 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -27,7 +27,7 @@ tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true # trust-dns -trust-dns-resolver = "0.23" +hickory-resolver = { version = "0.25.0-alpha.4" } # misc data-encoding = "2" @@ -58,6 +58,6 @@ serde = [ "parking_lot/serde", "rand/serde", "secp256k1/serde", - "trust-dns-resolver/serde", + "hickory-resolver/serde", "reth-ethereum-forks/serde" ] diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 42c444f89a7..255f2ad4a10 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -1,10 +1,10 @@ //! Perform DNS lookups +use hickory_resolver::name_server::ConnectionProvider; +pub use hickory_resolver::{ResolveError, TokioResolver}; use parking_lot::RwLock; use std::{collections::HashMap, future::Future}; use tracing::trace; -pub use trust_dns_resolver::{error::ResolveError, TokioAsyncResolver}; -use trust_dns_resolver::{name_server::ConnectionProvider, AsyncResolver}; /// A type that can lookup DNS entries pub trait Resolver: Send + Sync + Unpin + 'static { @@ -12,7 +12,7 @@ pub trait Resolver: Send + Sync + Unpin + 'static { fn lookup_txt(&self, query: &str) -> impl Future> + Send; } -impl Resolver for AsyncResolver

{ +impl Resolver for hickory_resolver::Resolver

{ async fn lookup_txt(&self, query: &str) -> Option { // See: [AsyncResolver::txt_lookup] // > *hint* queries that end with a '.' are fully qualified names and are cheaper lookups @@ -33,7 +33,7 @@ impl Resolver for AsyncResolver

{ /// An asynchronous DNS resolver /// -/// See also [`TokioAsyncResolver`] +/// See also [`TokioResolver`] /// /// ``` /// # fn t() { @@ -43,16 +43,16 @@ impl Resolver for AsyncResolver

{ /// ``` /// /// Note: This [Resolver] can send multiple lookup attempts, See also -/// [`ResolverOpts`](trust_dns_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) +/// [`ResolverOpts`](hickory_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) /// by default. #[derive(Clone, Debug)] -pub struct DnsResolver(TokioAsyncResolver); +pub struct DnsResolver(TokioResolver); // === impl DnsResolver === impl DnsResolver { - /// Create a new resolver by wrapping the given [`AsyncResolver`] - pub const fn new(resolver: TokioAsyncResolver) -> Self { + /// Create a new resolver by wrapping the given [`TokioResolver`]. + pub const fn new(resolver: TokioResolver) -> Self { Self(resolver) } @@ -60,7 +60,7 @@ impl DnsResolver { /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. pub fn from_system_conf() -> Result { - TokioAsyncResolver::tokio_from_system_conf().map(Self::new) + TokioResolver::tokio_from_system_conf().map(Self::new) } } From 3e859058cdc42c7f380b3f5a61e6f40eec3d1741 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 15:59:18 +0100 Subject: [PATCH 945/970] chore: add default impls for withdrawals and ommers root (#13229) --- crates/primitives-traits/src/block/body.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index cec32999070..14436ee01c5 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,7 +1,7 @@ //! Block body abstraction. use crate::{ - FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, + BlockHeader, FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, SignedTransaction, }; use alloc::{fmt, vec::Vec}; @@ -36,7 +36,7 @@ pub trait BlockBody: type Transaction: SignedTransaction; /// Ommer header type. - type OmmerHeader; + type OmmerHeader: BlockHeader; /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::Transaction]; @@ -52,9 +52,25 @@ pub trait BlockBody: /// Returns block withdrawals if any. fn withdrawals(&self) -> Option<&Withdrawals>; + /// Calculate the withdrawals root for the block body. + /// + /// Returns `None` if there are no withdrawals in the block. + fn calculate_withdrawals_root(&self) -> Option { + self.withdrawals().map(|withdrawals| { + alloy_consensus::proofs::calculate_withdrawals_root(withdrawals.as_slice()) + }) + } + /// Returns block ommers if any. fn ommers(&self) -> Option<&[Self::OmmerHeader]>; + /// Calculate the ommers root for the block body. + /// + /// Returns `None` if there are no ommers in the block. + fn calculate_ommers_root(&self) -> Option { + self.ommers().map(alloy_consensus::proofs::calculate_ommers_root) + } + /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. fn blob_gas_used(&self) -> u64 { // TODO(mattss): simplify after From 49d3d82b64a9a0f7dd2ee46dc7856e2d383b3fa5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 15:59:34 +0100 Subject: [PATCH 946/970] chore: use BlockHeader trait (#13224) --- crates/transaction-pool/src/validate/eth.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index e3b7af736cd..998de5ffb51 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -11,9 +11,12 @@ use crate::{ EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, +use alloy_consensus::{ + constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, + }, + BlockHeader, }; use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; @@ -102,7 +105,7 @@ where } fn on_new_head_block(&self, new_tip_block: &SealedBlock) { - self.inner.on_new_head_block(new_tip_block) + self.inner.on_new_head_block(new_tip_block.header()) } } @@ -469,17 +472,17 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + fn on_new_head_block(&self, new_tip_block: &T) { // update all forks - if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.prague.store(true, std::sync::atomic::Ordering::Relaxed); } } From 3bc7b00fb3e6cc449e93aa72eaf40d566f4d66fb Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 9 Dec 2024 16:03:50 +0100 Subject: [PATCH 947/970] ci: move deny to lint (#13230) --- .github/workflows/deny.yml | 27 --------------------------- .github/workflows/lint.yml | 4 ++++ 2 files changed, 4 insertions(+), 27 deletions(-) delete mode 100644 .github/workflows/deny.yml diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml deleted file mode 100644 index f85484ca2ec..00000000000 --- a/.github/workflows/deny.yml +++ /dev/null @@ -1,27 +0,0 @@ -# Runs `cargo-deny` when modifying `Cargo.lock`. - -name: deny - -on: - push: - branches: [main] - paths: [Cargo.lock] - pull_request: - branches: [main] - paths: [Cargo.lock] - merge_group: - -env: - CARGO_TERM_COLOR: always - -concurrency: deny-${{ github.head_ref || github.run_id }} - -jobs: - deny: - name: deny - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: EmbarkStudios/cargo-deny-action@v2 - with: - command: check all diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 61ba54e9556..418fd4cc4e6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -251,6 +251,9 @@ jobs: zepter --version time zepter run check + deny: + uses: ithacaxyz/ci/.github/workflows/deny.yml@main + lint-success: name: lint success runs-on: ubuntu-latest @@ -269,6 +272,7 @@ jobs: - no-test-deps - features - feature-propagation + - deny timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed From da5ffc24c229f8b0b459213b536ad8e7396c7348 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 15:12:36 +0000 Subject: [PATCH 948/970] fix(trie): reveal extension child in sparse trie when updating a leaf (#13183) --- crates/trie/sparse/src/state.rs | 48 +++---- crates/trie/sparse/src/trie.rs | 216 +++++++++++++++++--------------- 2 files changed, 141 insertions(+), 123 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index ec51df8982c..6638632f0ad 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -273,30 +273,6 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update the account leaf node. - pub fn update_account_leaf( - &mut self, - path: Nibbles, - value: Vec, - ) -> SparseStateTrieResult<()> { - self.state.update_leaf(path, value)?; - Ok(()) - } - - /// Update the leaf node of a storage trie at the provided address. - pub fn update_storage_leaf( - &mut self, - address: B256, - slot: Nibbles, - value: Vec, - ) -> SparseStateTrieResult<()> { - if let Some(storage_trie) = self.storages.get_mut(&address) { - Ok(storage_trie.update_leaf(slot, value)?) - } else { - Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) - } - } - /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { if let Some(trie) = self.storages.get_mut(&address) { @@ -354,6 +330,30 @@ where SparseTrieError: From<::Error> + From<::Error>, { + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.state.update_leaf(path, value)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.update_leaf(slot, value)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } + /// Update or remove trie account based on new account info. This method will either recompute /// the storage root based on update storage trie or look it up from existing leaf value. /// diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8fff0819bcb..e4d4ff701f3 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -104,13 +104,6 @@ impl

SparseTrie

{ Ok(self.as_revealed_mut().unwrap()) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - revealed.update_leaf(path, value)?; - Ok(()) - } - /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -134,6 +127,13 @@ where P: BlindedProvider, SparseTrieError: From, { + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.update_leaf(path, value)?; + Ok(()) + } + /// Remove the leaf node. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -372,98 +372,6 @@ impl

RevealedSparseTrie

{ self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None) } - /// Update the leaf node with provided value. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - self.prefix_set.insert(path.clone()); - let existing = self.values.insert(path.clone(), value); - if existing.is_some() { - // trie structure unchanged, return immediately - return Ok(()) - } - - let mut current = Nibbles::default(); - while let Some(node) = self.nodes.get_mut(¤t) { - match node { - SparseNode::Empty => { - *node = SparseNode::new_leaf(path); - break - } - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) - } - SparseNode::Leaf { key: current_key, .. } => { - current.extend_from_slice_unchecked(current_key); - - // this leaf is being updated - if current == path { - unreachable!("we already checked leaf presence in the beginning"); - } - - // find the common prefix - let common = current.common_prefix_length(&path); - - // update existing node - let new_ext_key = current.slice(current.len() - current_key.len()..common); - *node = SparseNode::new_ext(new_ext_key); - - // create a branch node and corresponding leaves - self.nodes.insert( - current.slice(..common), - SparseNode::new_split_branch(current[common], path[common]), - ); - self.nodes.insert( - path.slice(..=common), - SparseNode::new_leaf(path.slice(common + 1..)), - ); - self.nodes.insert( - current.slice(..=common), - SparseNode::new_leaf(current.slice(common + 1..)), - ); - - break; - } - SparseNode::Extension { key, .. } => { - current.extend_from_slice(key); - if !path.starts_with(¤t) { - // find the common prefix - let common = current.common_prefix_length(&path); - - *key = current.slice(current.len() - key.len()..common); - - // create state mask for new branch node - // NOTE: this might overwrite the current extension node - let branch = SparseNode::new_split_branch(current[common], path[common]); - self.nodes.insert(current.slice(..common), branch); - - // create new leaf - let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); - self.nodes.insert(path.slice(..=common), new_leaf); - - // recreate extension to previous child if needed - let key = current.slice(common + 1..); - if !key.is_empty() { - self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); - } - - break; - } - } - SparseNode::Branch { state_mask, .. } => { - let nibble = path[current.len()]; - current.push_unchecked(nibble); - if !state_mask.is_bit_set(nibble) { - state_mask.set_bit(nibble); - let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); - self.nodes.insert(current, new_leaf); - break; - } - } - }; - } - - Ok(()) - } - /// Traverse trie nodes down to the leaf node and collect all nodes along the path. fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { let mut current = Nibbles::default(); // Start traversal from the root @@ -866,6 +774,116 @@ where P: BlindedProvider, SparseTrieError: From, { + /// Update the leaf node with provided value. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + let existing = self.values.insert(path.clone(), value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(path); + break + } + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend_from_slice_unchecked(current_key); + + // this leaf is being updated + if current == path { + unreachable!("we already checked leaf presence in the beginning"); + } + + // find the common prefix + let common = current.common_prefix_length(&path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch(current[common], path[common]), + ); + self.nodes.insert( + path.slice(..=common), + SparseNode::new_leaf(path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; + } + SparseNode::Extension { key, .. } => { + current.extend_from_slice(key); + + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self.nodes.get(¤t).unwrap().is_hash() { + if let Some(node) = self.provider.blinded_node(current.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?current, ?decoded, "Revealing extension node child"); + // We'll never have to update the revealed child node, only + // remove or do nothing, so + // we can safely ignore the hash mask here and + // pass `None`. + self.reveal_node(current.clone(), decoded, None)?; + } + } + } + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + let branch = SparseNode::new_split_branch(current[common], path[common]); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } + } + }; + } + + Ok(()) + } + /// Remove leaf node from the trie. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { if self.values.remove(path).is_none() { From b5bbb8d751d48be15b4f5c13c965716be9c1eeda Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:14:39 -0500 Subject: [PATCH 949/970] chore: rename HighestStaticFiles::min (#13235) --- crates/cli/commands/src/prune.rs | 3 ++- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/stages/api/src/pipeline/mod.rs | 2 +- crates/static-file/types/src/lib.rs | 12 ++++++------ 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index 37f0637b0a5..a5b186bc138 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -24,7 +24,8 @@ impl> PruneComma info!(target: "reth::cli", "Copying data from database to static files..."); let static_file_producer = StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); - let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min(); + let lowest_static_file_height = + static_file_producer.lock().copy_to_static_files()?.min_block_num(); info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); // Delete data which has been copied to static files. diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 2d29121d069..de535d65508 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -58,7 +58,7 @@ impl> Command let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() - .max() + .max_block_num() .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 39d26cd8808..ec57de8d11c 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -256,7 +256,7 @@ impl Pipeline { pub fn move_to_static_files(&self) -> RethResult<()> { // Copies data from database to static files let lowest_static_file_height = - self.static_file_producer.lock().copy_to_static_files()?.min(); + self.static_file_producer.lock().copy_to_static_files()?.min_block_num(); // Deletes data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 4fc9c545e7c..7a9980b3559 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -55,12 +55,12 @@ impl HighestStaticFiles { } /// Returns the minimum block of all segments. - pub fn min(&self) -> Option { + pub fn min_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() } /// Returns the maximum block of all segments. - pub fn max(&self) -> Option { + pub fn max_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() } } @@ -154,11 +154,11 @@ mod tests { HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; // Minimum value among the available segments - assert_eq!(files.min(), Some(100)); + assert_eq!(files.min_block_num(), Some(100)); let empty_files = HighestStaticFiles::default(); // No values, should return None - assert_eq!(empty_files.min(), None); + assert_eq!(empty_files.min_block_num(), None); } #[test] @@ -167,11 +167,11 @@ mod tests { HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; // Maximum value among the available segments - assert_eq!(files.max(), Some(500)); + assert_eq!(files.max_block_num(), Some(500)); let empty_files = HighestStaticFiles::default(); // No values, should return None - assert_eq!(empty_files.max(), None); + assert_eq!(empty_files.max_block_num(), None); } #[test] From a3e90e18b648edc20e18bcb615b46f8ec35d12fe Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 17:33:17 +0000 Subject: [PATCH 950/970] fix(trie): retain updates only for sparse branch nodes in the prefix set (#13234) --- crates/trie/sparse/src/trie.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e4d4ff701f3..b4adc8c60a5 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -623,6 +623,7 @@ impl

RevealedSparseTrie

{ )); continue } + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first @@ -650,7 +651,7 @@ impl

RevealedSparseTrie

{ buffers.rlp_node_stack.pop().unwrap(); // Update the masks only if we need to retain trie updates - if self.updates.is_some() { + if retain_updates { // Set the trie mask let tree_mask_value = if node_type.store_in_db_trie() { // A branch or an extension node explicitly set the @@ -716,7 +717,7 @@ impl

RevealedSparseTrie

{ // Save a branch node update only if it's not a root node, and we need to // persist updates. let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| !path.is_empty()) + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) { let mut tree_mask_values = tree_mask_values.into_iter().rev(); let mut hash_mask_values = hash_mask_values.into_iter().rev(); From 3af2afe99528750dc7465433c4248fd1435bf3e0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 19:26:32 +0100 Subject: [PATCH 951/970] chore: relax more consensus functions (#13236) --- crates/consensus/common/Cargo.toml | 4 +- crates/consensus/common/src/validation.rs | 70 +++++++++++++---------- crates/ethereum/consensus/src/lib.rs | 2 +- crates/optimism/consensus/src/lib.rs | 2 +- crates/primitives/src/block.rs | 62 +++++++++++--------- 5 files changed, 79 insertions(+), 61 deletions(-) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 272adbb9297..a9a0c69ae55 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives.workspace = true reth-consensus.workspace = true +reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true @@ -24,8 +24,8 @@ alloy-consensus.workspace = true alloy-eips.workspace = true [dev-dependencies] +alloy-consensus.workspace = true reth-storage-api.workspace = true rand.workspace = true mockall = "0.13" -alloy-consensus.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 2c38fa2d6cd..37b6138e5d4 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,14 +1,14 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ calc_next_block_base_fee, eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, }; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; -use reth_primitives_traits::BlockBody as _; +use reth_primitives::SealedBlock; +use reth_primitives_traits::{BlockBody, GotExpected, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -43,11 +43,11 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals( +pub fn validate_shanghai_withdrawals( block: &SealedBlock, ) -> Result<(), ConsensusError> { let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + let withdrawals_root = alloy_consensus::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { @@ -64,7 +64,7 @@ pub fn validate_shanghai_withdrawals( +pub fn validate_cancun_gas( block: &SealedBlock, ) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each @@ -87,28 +87,31 @@ pub fn validate_cancun_gas /// - ommer hash /// - transaction root /// - withdrawals root -pub fn validate_body_against_header( - body: &BlockBody, - header: &SealedHeader, -) -> Result<(), ConsensusError> { +pub fn validate_body_against_header(body: &B, header: &H) -> Result<(), ConsensusError> +where + B: BlockBody, + H: BlockHeader, +{ let ommers_hash = body.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { + if Some(header.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: header.ommers_hash(), + } + .into(), )) } let tx_root = body.calculate_tx_root(); - if header.transactions_root != tx_root { + if header.transactions_root() != tx_root { return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), + GotExpected { got: tx_root, expected: header.transactions_root() }.into(), )) } - match (header.withdrawals_root, &body.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + match (header.withdrawals_root(), body.calculate_withdrawals_root()) { + (Some(header_withdrawals_root), Some(withdrawals_root)) => { if withdrawals_root != header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), @@ -130,15 +133,24 @@ pub fn validate_body_against_header( /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_pre_execution( - block: &SealedBlock, +pub fn validate_block_pre_execution( + block: &SealedBlock, chain_spec: &ChainSpec, -) -> Result<(), ConsensusError> { +) -> Result<(), ConsensusError> +where + H: BlockHeader, + B: BlockBody, + ChainSpec: EthereumHardforks, +{ // Check ommers hash let ommers_hash = block.body.calculate_ommers_root(); - if block.header.ommers_hash != ommers_hash { + if Some(block.header.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: block.header.ommers_hash(), + } + .into(), )) } @@ -148,11 +160,11 @@ pub fn validate_block_pre_execution( } // EIP-4895: Beacon chain push withdrawals as operations - if chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + if chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; } - if chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + if chain_spec.is_cancun_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } @@ -222,12 +234,12 @@ pub fn validate_header_extradata(header: &H) -> Result<(), Conse #[inline] pub fn validate_against_parent_hash_number( header: &H, - parent: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number() { + if parent.number() + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: parent.number, + parent_block_number: parent.number(), block_number: header.number(), }) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ba737e56728..4d3ba628269 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -121,7 +121,7 @@ impl Consensu body: &BlockBody, header: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) + validate_body_against_header(body, header.header()) } fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 6d457f42c90..d05ff9c9bd7 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -65,7 +65,7 @@ impl Consensus for OpBeaconConsensus { body: &BlockBody, header: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) + validate_body_against_header(body, header.header()) } fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 799e3e7a4c9..b02456f6c48 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -296,6 +296,40 @@ impl SealedBlock { } } +impl SealedBlock +where + H: alloy_consensus::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Ensures that the transaction root in the block header is valid. + /// + /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure + /// populated with each transaction in the transactions list portion of the block. + /// + /// # Returns + /// + /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, + /// indicating that the transactions in the block are correctly represented in the trie. + /// + /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` + /// error containing the calculated and expected roots. + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { + let calculated_root = self.body.calculate_tx_root(); + + if self.header.transactions_root() != calculated_root { + return Err(GotExpected { + got: calculated_root, + expected: self.header.transactions_root(), + }) + } + + Ok(()) + } +} + impl SealedBlock where H: reth_primitives_traits::BlockHeader, @@ -385,34 +419,6 @@ where Block::new(self.header.unseal(), self.body) } - /// Ensures that the transaction root in the block header is valid. - /// - /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure - /// populated with each transaction in the transactions list portion of the block. - /// - /// # Returns - /// - /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, - /// indicating that the transactions in the block are correctly represented in the trie. - /// - /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` - /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> - where - B::Transaction: Encodable2718, - { - let calculated_root = self.body.calculate_tx_root(); - - if self.header.transactions_root() != calculated_root { - return Err(GotExpected { - got: calculated_root, - expected: self.header.transactions_root(), - }) - } - - Ok(()) - } - /// Returns a vector of encoded 2718 transactions. /// /// This is also known as `raw transactions`. From c7c84f2d3fc59cbec2288f6a097dc029c9fb0375 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 9 Dec 2024 23:08:49 +0400 Subject: [PATCH 952/970] feat: relax bounds for `eth_simulateV1` (#13232) --- Cargo.lock | 1 - crates/optimism/rpc/src/eth/call.rs | 6 +- crates/optimism/rpc/src/eth/pending_block.rs | 26 ++-- crates/optimism/rpc/src/eth/transaction.rs | 17 ++- crates/primitives-traits/src/block/mod.rs | 3 + crates/rpc/rpc-eth-api/Cargo.toml | 1 - crates/rpc/rpc-eth-api/src/core.rs | 8 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 44 +++--- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 7 +- .../rpc-eth-api/src/helpers/pending_block.rs | 59 +++++--- .../rpc-eth-api/src/helpers/transaction.rs | 6 +- crates/rpc/rpc-eth-types/src/simulate.rs | 143 +++++------------- .../rpc/rpc-types-compat/src/transaction.rs | 4 + crates/rpc/rpc/src/eth/helpers/call.rs | 7 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 25 +-- crates/rpc/rpc/src/eth/helpers/types.rs | 15 ++ 16 files changed, 172 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb49d13e872..58982032e30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9129,7 +9129,6 @@ dependencies = [ "reth-chainspec", "reth-errors", "reth-evm", - "reth-execution-types", "reth-network-api", "reth-node-api", "reth-primitives", diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f7691756408..959d765e349 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -5,15 +5,15 @@ use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; use reth_provider::ProviderHeader; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, + FromEthApiError, FullEthApiTypes, IntoEthApiError, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi where - Self: EstimateCall + LoadPendingBlock, + Self: EstimateCall + LoadBlock + FullEthApiTypes, N: OpNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5c437de76a7..01c2264063e 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -21,7 +21,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, SpecId}; +use revm::primitives::{BlockEnv, ExecutionResult}; impl LoadPendingBlock for OpEthApi where @@ -82,23 +82,26 @@ where fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: B256, state_root: B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> reth_provider::ProviderBlock { let chain_spec = self.provider().chain_spec(); + let timestamp = block_env.timestamp.to::(); let transactions_root = calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root_no_memo_optimism( &receipts.iter().collect::>(), &chain_spec, - block_env.timestamp.to::(), + timestamp, ); let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); let header = Header { parent_hash, @@ -107,10 +110,9 @@ where state_root, transactions_root, receipts_root, - withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) - .then_some(EMPTY_WITHDRAWALS), + withdrawals_root: (is_shanghai).then_some(EMPTY_WITHDRAWALS), logs_bloom, - timestamp: block_env.timestamp.to::(), + timestamp, mix_hash: block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(block_env.basefee.to::()), @@ -118,15 +120,13 @@ where gas_limit: block_env.gas_limit.to::(), difficulty: U256::ZERO, gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), - blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + blob_gas_used: is_cancun.then(|| { transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() }), excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), - parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) - .then_some(B256::ZERO), - requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) - .then_some(EMPTY_REQUESTS_HASH), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), target_blobs_per_block: None, }; @@ -139,7 +139,7 @@ where fn assemble_receipt( &self, - tx: &reth_primitives::RecoveredTx>, + tx: &ProviderTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> reth_provider::ProviderReceipt { diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index d455d8e897e..468b46d97eb 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,7 +1,7 @@ //! Loads and formats OP transaction RPC response. use alloy_consensus::{Signed, Transaction as _}; -use alloy_primitives::{Bytes, Sealable, Sealed, B256}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, Sealable, Sealed, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; @@ -14,7 +14,7 @@ use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, }; -use reth_rpc_eth_types::utils::recover_raw_transaction; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; @@ -151,6 +151,19 @@ where }) } + fn build_simulate_v1_transaction( + &self, + request: alloy_rpc_types_eth::TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(OpEthApiError::Eth(EthApiError::TransactionConversionError)) + }; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) + } + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { let input = match &mut tx.inner.inner { OpTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 53afc737768..1994075b922 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -24,6 +24,9 @@ impl FullBlock for T where { } +/// Helper trait to access [`BlockBody::Transaction`] given a [`Block`]. +pub type BlockTx = <::Body as BlockBody>::Transaction; + /// Abstraction of block data type. // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 95ed98d8086..6f65b91d8f8 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -26,7 +26,6 @@ reth-rpc-types-compat.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-chainspec.workspace = true -reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 810400c6f6e..c103835a801 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -13,7 +13,6 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_provider::BlockReader; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -372,12 +371,7 @@ impl RpcHeader, > for T where - T: FullEthApi< - Provider: BlockReader< - Header = alloy_consensus::Header, - Transaction = reth_primitives::TransactionSigned, - >, - >, + T: FullEthApi, jsonrpsee_types::error::ErrorObject<'static>: From, { /// Handler for: `eth_protocolVersion` diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index f6d665121fc..e22fccc6726 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,9 +20,7 @@ use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_api::BlockBody; use reth_primitives_traits::SignedTransaction; -use reth_provider::{ - BlockIdReader, BlockReader, ChainSpecProvider, HeaderProvider, ProviderHeader, -}; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader}; use reth_revm::{ database::StateProviderDatabase, db::CacheDB, @@ -50,7 +48,7 @@ pub type SimulatedBlocksResult = Result>>, /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. -pub trait EthCall: EstimateCall + Call + LoadPendingBlock { +pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthApiTypes { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, @@ -70,15 +68,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { &self, payload: SimulatePayload, block: Option, - ) -> impl Future> + Send - where - Self: LoadBlock< - Provider: BlockReader< - Header = alloy_consensus::Header, - Transaction = reth_primitives::TransactionSigned, - >, - > + FullEthApiTypes, - { + ) -> impl Future> + Send { async move { if payload.block_state_calls.len() > self.max_simulate_blocks() as usize { return Err(EthApiError::InvalidParams("too many blocks.".to_string()).into()) @@ -171,9 +161,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { block_env.gas_limit.to(), cfg.chain_id, &mut db, + this.tx_resp_builder(), )?; let mut calls = calls.into_iter().peekable(); + let mut senders = Vec::with_capacity(transactions.len()); let mut results = Vec::with_capacity(calls.len()); while let Some(tx) = calls.next() { @@ -197,18 +189,27 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { db.commit(res.state); } - results.push((env.tx.caller, res.result)); + senders.push(env.tx.caller); + results.push(res.result); } + let (block, _) = this.assemble_block_and_receipts( + &block_env, + parent_hash, + // state root calculation is skipped for performance reasons + B256::ZERO, + transactions, + results.clone(), + ); + let block: SimulatedBlock> = - simulate::build_block( + simulate::build_simulated_block( + senders, results, - transactions, - &block_env, - parent_hash, total_difficulty, return_full_transactions, this.tx_resp_builder(), + block, )?; parent_hash = block.inner.header.hash; @@ -245,10 +246,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { bundle: Bundle, state_context: Option, mut state_override: Option, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadBlock, - { + ) -> impl Future, Self::Error>> + Send { async move { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { @@ -608,7 +606,7 @@ pub trait Call: f: F, ) -> impl Future, Self::Error>> + Send where - Self: LoadBlock + LoadPendingBlock + LoadTransaction, + Self: LoadBlock + LoadTransaction, F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> Result + Send + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 174cb3bad04..27d23da74b2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -42,12 +42,9 @@ pub use transaction::{EthTransactions, LoadTransaction}; use crate::FullEthApiTypes; /// Extension trait that bundles traits needed for tracing transactions. -pub trait TraceExt: - LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call -{ -} +pub trait TraceExt: LoadTransaction + LoadBlock + SpawnBlocking + Trace + Call {} -impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + Trace + Call {} +impl TraceExt for T where T: LoadTransaction + LoadBlock + Trace + Call {} /// Helper trait to unify all `eth` rpc server building block traits, for simplicity. /// diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 2ca6c028c31..c6e0e0c5939 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -15,8 +15,7 @@ use reth_evm::{ state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockExt, InvalidTransactionError, RecoveredTx, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, InvalidTransactionError, SealedBlockWithSenders}; use reth_primitives_traits::receipt::ReceiptExt; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, ProviderError, @@ -199,7 +198,7 @@ pub trait LoadPendingBlock: /// Assembles a receipt for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &RecoveredTx>, + tx: &ProviderTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> ProviderReceipt; @@ -207,14 +206,36 @@ pub trait LoadPendingBlock: /// Assembles a pending block. fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: revm_primitives::B256, state_root: revm_primitives::B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> ProviderBlock; + /// Helper to invoke both [`Self::assemble_block`] and [`Self::assemble_receipt`]. + fn assemble_block_and_receipts( + &self, + block_env: &BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + results: Vec, + ) -> (ProviderBlock, Vec>) { + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(results.len()); + + for (tx, outcome) in transactions.iter().zip(results) { + cumulative_gas_used += outcome.gas_used(); + receipts.push(self.assemble_receipt(tx, outcome, cumulative_gas_used)); + } + + let block = + self.assemble_block(block_env, parent_hash, state_root, transactions, &receipts); + + (block, receipts) + } + /// Builds a pending block using the configured provider and pool. /// /// If the origin is the actual pending block, the block is built with withdrawals. @@ -248,7 +269,6 @@ pub trait LoadPendingBlock: let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = block_env.gas_limit.to::(); let base_fee = block_env.basefee.to::(); - let block_number = block_env.number.to::(); let mut executed_txs = Vec::new(); let mut senders = Vec::new(); @@ -266,7 +286,7 @@ pub trait LoadPendingBlock: .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; - let mut receipts = Vec::new(); + let mut results = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -374,13 +394,11 @@ pub trait LoadPendingBlock: // add gas used by the transaction to cumulative gas used, before creating the receipt cumulative_gas_used += gas_used; - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(self.assemble_receipt(&tx, result, cumulative_gas_used))); - // append transaction to the list of executed transactions let (tx, sender) = tx.to_components(); executed_txs.push(tx); senders.push(sender); + results.push(result); } // executes the withdrawals and commits them to the Database and BundleState. @@ -396,22 +414,19 @@ pub trait LoadPendingBlock: // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); - let execution_outcome: ExecutionOutcome> = - ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let hashed_state = db.database.hashed_post_state(execution_outcome.state()); + let bundle_state = db.take_bundle(); + let hashed_state = db.database.hashed_post_state(&bundle_state); // calculate the state root let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; - // Convert Vec> to Vec - let receipts: Vec<_> = receipts.into_iter().flatten().collect(); - let block = - self.assemble_block(cfg, block_env, parent_hash, state_root, executed_txs, &receipts); + let (block, receipts) = self.assemble_block_and_receipts( + &block_env, + parent_hash, + state_root, + executed_txs, + results, + ); Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 364ea27cc31..253aac91d8b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -25,9 +25,7 @@ use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_blo use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use super::{ - EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, -}; +use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, @@ -365,7 +363,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + EstimateCall, + Self: EthApiSpec + LoadBlock + EstimateCall, { async move { let from = match request.from { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index e5ccb47ba5c..a6ea5c4b788 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,21 +1,18 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxType}; -use alloy_primitives::PrimitiveSignature as Signature; +use alloy_consensus::{BlockHeader, Transaction as _, TxType}; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, transaction::TransactionRequest, - Block, BlockTransactionsKind, + Block, BlockTransactionsKind, Header, }; use jsonrpsee_types::ErrorObject; -use reth_primitives::{ - proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, TransactionSigned, -}; +use reth_primitives::BlockWithSenders; +use reth_primitives_traits::{block::BlockTx, BlockBody as _, SignedTransaction}; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; use revm::Database; -use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm_primitives::{Address, Bytes, ExecutionResult, TxKind, U256}; use crate::{ error::{api::FromEthApiError, ToRpcError}, @@ -49,17 +46,18 @@ impl ToRpcError for EthSimulateError { } /// Goes over the list of [`TransactionRequest`]s and populates missing fields trying to resolve -/// them into [`TransactionSigned`]. +/// them into primitive transactions. /// /// If validation is enabled, the function will return error if any of the transactions can't be /// built right away. -pub fn resolve_transactions( +pub fn resolve_transactions>( txs: &mut [TransactionRequest], validation: bool, block_gas_limit: u64, chain_id: u64, db: &mut DB, -) -> Result, EthApiError> + tx_resp_builder: &T, +) -> Result, EthApiError> where EthApiError: From, { @@ -125,49 +123,44 @@ where } } - let Ok(tx) = tx.clone().build_typed_tx() else { - return Err(EthApiError::TransactionConversionError) - }; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - let tx = TransactionSigned::new_unhashed(tx.into(), signature); - transactions.push(tx); + transactions.push( + tx_resp_builder + .build_simulate_v1_transaction(tx.clone()) + .map_err(|e| EthApiError::other(e.into()))?, + ); } Ok(transactions) } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -pub fn build_block>( - results: Vec<(Address, ExecutionResult)>, - transactions: Vec, - block_env: &BlockEnv, - parent_hash: B256, +#[expect(clippy::type_complexity)] +pub fn build_simulated_block( + senders: Vec

, + results: Vec, total_difficulty: U256, full_transactions: bool, tx_resp_builder: &T, -) -> Result>, T::Error> { + block: B, +) -> Result>>, T::Error> +where + T: TransactionCompat, Error: FromEthApiError>, + B: reth_primitives_traits::Block, +{ let mut calls: Vec = Vec::with_capacity(results.len()); - let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::with_capacity(results.len()); let mut log_index = 0; - for (transaction_index, ((sender, result), tx)) in - results.into_iter().zip(transactions.iter()).enumerate() - { - senders.push(sender); - + for (index, (result, tx)) in results.iter().zip(block.body().transactions()).enumerate() { let call = match result { ExecutionResult::Halt { reason, gas_used } => { - let error = RpcInvalidTransactionError::halt(reason, tx.gas_limit()); + let error = RpcInvalidTransactionError::halt(*reason, tx.gas_limit()); SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, logs: Vec::new(), status: false, } @@ -175,31 +168,31 @@ pub fn build_block>( ExecutionResult::Revert { output, gas_used } => { let error = RevertError::new(output.clone()); SimCallResult { - return_data: output, + return_data: output.clone(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, status: false, logs: Vec::new(), } } ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { - return_data: output.into_data(), + return_data: output.clone().into_data(), error: None, - gas_used, + gas_used: *gas_used, logs: logs - .into_iter() + .iter() .map(|log| { log_index += 1; alloy_rpc_types_eth::Log { - inner: log, + inner: log.clone(), log_index: Some(log_index - 1), - transaction_index: Some(transaction_index as u64), - transaction_hash: Some(tx.hash()), - block_number: Some(block_env.number.to()), - block_timestamp: Some(block_env.timestamp.to()), + transaction_index: Some(index as u64), + transaction_hash: Some(*tx.tx_hash()), + block_number: Some(block.header().number()), + block_timestamp: Some(block.header().timestamp()), ..Default::default() } }) @@ -208,70 +201,10 @@ pub fn build_block>( }, }; - receipts.push( - #[allow(clippy::needless_update)] - Receipt { - tx_type: tx.tx_type(), - success: call.status, - cumulative_gas_used: call.gas_used + calls.iter().map(|c| c.gas_used).sum::(), - logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), - ..Default::default() - } - .with_bloom(), - ); - calls.push(call); } - // TODO: uncomment once performance cost is acceptable - // - // let mut hashed_state = HashedPostState::default(); - // for (address, account) in &db.accounts { - // let hashed_address = keccak256(address); - // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - // let storage = hashed_state - // .storages - // .entry(hashed_address) - // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - // for (slot, value) in &account.storage { - // let slot = B256::from(*slot); - // let hashed_slot = keccak256(slot); - // storage.storage.insert(hashed_slot, *value); - // } - // } - - // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; - let state_root = B256::ZERO; - - let header = alloy_consensus::Header { - beneficiary: block_env.coinbase, - difficulty: block_env.difficulty, - number: block_env.number.to(), - timestamp: block_env.timestamp.to(), - base_fee_per_gas: Some(block_env.basefee.to()), - gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum(), - blob_gas_used: Some(0), - parent_hash, - receipts_root: calculate_receipt_root(&receipts), - transactions_root: calculate_transaction_root(&transactions), - state_root, - logs_bloom: alloy_primitives::logs_bloom( - receipts.iter().flat_map(|r| r.receipt.logs.iter()), - ), - mix_hash: block_env.prevrandao.unwrap_or_default(), - ..Default::default() - }; - - let block = BlockWithSenders { - block: reth_primitives::Block { - header, - body: BlockBody { transactions, ..Default::default() }, - }, - senders, - }; + let block = BlockWithSenders { block, senders }; let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index d6180ca1ee2..d3d1a71decc 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -57,6 +57,10 @@ pub trait TransactionCompat: tx_inf: TransactionInfo, ) -> Result; + /// Builds a fake transaction from a transaction request for inclusion into block built in + /// `eth_simulateV1`. + fn build_simulate_v1_transaction(&self, request: TransactionRequest) -> Result; + /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar // . diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index bddd2b1b8fc..2620165b907 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -4,13 +4,14 @@ use crate::EthApi; use alloy_consensus::Header; use reth_evm::ConfigureEvm; use reth_provider::{BlockReader, ProviderHeader}; -use reth_rpc_eth_api::helpers::{ - estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + FullEthApiTypes, }; impl EthCall for EthApi where - Self: EstimateCall + LoadPendingBlock, + Self: EstimateCall + LoadPendingBlock + FullEthApiTypes, Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 344f56da849..2af82ef511b 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -20,7 +20,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId, B256}; +use revm_primitives::{BlockEnv, B256}; use crate::EthApi; @@ -56,18 +56,24 @@ where fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: revm_primitives::B256, state_root: revm_primitives::B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> reth_provider::ProviderBlock { + let chain_spec = self.provider().chain_spec(); + let transactions_root = calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root_no_memo(&receipts.iter().collect::>()); let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + let timestamp = block_env.timestamp.to::(); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + let header = Header { parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -75,8 +81,7 @@ where state_root, transactions_root, receipts_root, - withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) - .then_some(EMPTY_WITHDRAWALS), + withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS), logs_bloom, timestamp: block_env.timestamp.to::(), mix_hash: block_env.prevrandao.unwrap_or_default(), @@ -86,15 +91,13 @@ where gas_limit: block_env.gas_limit.to::(), difficulty: U256::ZERO, gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), - blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + blob_gas_used: is_cancun.then(|| { transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() }), excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), - parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) - .then_some(B256::ZERO), - requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) - .then_some(EMPTY_REQUESTS_HASH), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), target_blobs_per_block: None, }; @@ -107,7 +110,7 @@ where fn assemble_receipt( &self, - tx: &reth_primitives::RecoveredTx>, + tx: &ProviderTx, result: revm_primitives::ExecutionResult, cumulative_gas_used: u64, ) -> reth_provider::ProviderReceipt { diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 79fb6fcc907..28c66967e2f 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -2,6 +2,8 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; +use alloy_primitives::PrimitiveSignature as Signature; +use alloy_rpc_types::TransactionRequest; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_rpc_eth_api::EthApiTypes; @@ -84,6 +86,19 @@ where }) } + fn build_simulate_v1_transaction( + &self, + request: TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(EthApiError::TransactionConversionError) + }; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) + } + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { let input = match &mut tx.inner { TxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, From f4ae4399da0a6dfd8127e5ba1cae75413bc2ec5d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 19:41:00 +0000 Subject: [PATCH 953/970] perf(trie): use entry API in sparse trie (#13240) --- crates/trie/sparse/src/trie.rs | 122 +++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 44 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b4adc8c60a5..3cc0e8703c4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,7 +1,7 @@ use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; use alloy_primitives::{ hex, keccak256, - map::{HashMap, HashSet}, + map::{Entry, HashMap, HashSet}, B256, }; use alloy_rlp::Decodable; @@ -302,50 +302,80 @@ impl

RevealedSparseTrie

{ } } - match self.nodes.get(&path) { - // Blinded and non-existent nodes can be replaced. - Some(SparseNode::Hash(_)) | None => { - self.nodes.insert(path, SparseNode::new_branch(branch.state_mask)); - } - // Branch node already exists, or an extension node was placed where a - // branch node was before. - Some(SparseNode::Branch { .. } | SparseNode::Extension { .. }) => {} - // All other node types can't be handled. - Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Blinded nodes can be replaced. + SparseNode::Hash(_) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); } } } - TrieNode::Extension(ext) => match self.nodes.get(&path) { - Some(SparseNode::Hash(_)) | None => { - let mut child_path = path.clone(); + TrieNode::Extension(ext) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut child_path = entry.key().clone(); + child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + self.reveal_node_or_hash(child_path, &ext.child)?; + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut child_path = entry.key().clone(); child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::new_ext(ext.key)); - } - // Extension node already exists, or an extension node was placed where a branch - // node was before. - Some(SparseNode::Extension { .. } | SparseNode::Branch { .. }) => {} - // All other node types can't be handled. - Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) } }, - TrieNode::Leaf(leaf) => match self.nodes.get(&path) { - Some(SparseNode::Hash(_)) | None => { - let mut full = path.clone(); + TrieNode::Leaf(leaf) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut full = entry.key().clone(); + full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.values.insert(full, leaf.value); + } + // Left node already exists. + SparseNode::Leaf { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut full = entry.key().clone(); full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); self.values.insert(full, leaf.value); - self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); } - // Left node already exists. - Some(SparseNode::Leaf { .. }) => {} - // All other node types can't be handled. - Some( - node @ (SparseNode::Empty | - SparseNode::Extension { .. } | - SparseNode::Branch { .. }), - ) => return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }), }, } @@ -355,16 +385,20 @@ impl

RevealedSparseTrie

{ fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { if child.len() == B256::len_bytes() + 1 { let hash = B256::from_slice(&child[1..]); - match self.nodes.get(&path) { - // Hash node with a different hash can't be handled. - Some(node @ SparseNode::Hash(previous_hash)) if previous_hash != &hash => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) - } - None => { - self.nodes.insert(path, SparseNode::Hash(hash)); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(SparseNode::Hash(hash)), + }) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); } - // All other node types mean that it has already been revealed. - Some(_) => {} } return Ok(()) } From 3c132958d1932fd3038ee27fd66cfe39d75a078d Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 9 Dec 2024 20:52:58 +0100 Subject: [PATCH 954/970] perf(engine): add StateRootTask bench (#13212) --- crates/engine/tree/Cargo.toml | 4 + crates/engine/tree/benches/state_root_task.rs | 166 ++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 2 +- crates/engine/tree/src/tree/root.rs | 16 +- 4 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 crates/engine/tree/benches/state_root_task.rs diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 680b6933ebe..67cb72850ae 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -93,6 +93,10 @@ rand.workspace = true name = "channel_perf" harness = false +[[bench]] +name = "state_root_task" +harness = false + [features] test-utils = [ "reth-blockchain-tree/test-utils", diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs new file mode 100644 index 00000000000..391fd333d12 --- /dev/null +++ b/crates/engine/tree/benches/state_root_task.rs @@ -0,0 +1,166 @@ +//! Benchmark for `StateRootTask` complete workflow, including sending state +//! updates using the incoming messages sender and waiting for the final result. + +#![allow(missing_docs)] + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use reth_engine_tree::tree::root::{StateRootConfig, StateRootTask}; +use reth_evm::system_calls::OnStateHook; +use reth_primitives::{Account as RethAccount, StorageEntry}; +use reth_provider::{ + providers::ConsistentDbView, + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + HashingWriter, ProviderFactory, +}; +use reth_testing_utils::generators::{self, Rng}; +use reth_trie::TrieInput; +use revm_primitives::{ + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, HashMap, + B256, KECCAK_EMPTY, U256, +}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +struct BenchParams { + num_accounts: usize, + updates_per_account: usize, + storage_slots_per_account: usize, +} + +fn create_bench_state_updates(params: &BenchParams) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..params.num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..params.updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=params.num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + for _ in 0..params.storage_slots_per_account { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } + + updates +} + +fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) + }, + } +} + +fn setup_provider( + factory: &ProviderFactory, + state_updates: &[EvmState], +) -> Result<(), Box> { + let provider_rw = factory.provider_rw()?; + + for update in state_updates { + let account_updates = update + .iter() + .map(|(address, account)| (*address, Some(convert_revm_to_reth_account(account)))); + provider_rw.insert_account_for_hashing(account_updates)?; + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| StorageEntry { + key: B256::from(*slot), + value: value.present_value, + }); + (*address, storage_entries) + }); + provider_rw.insert_storage_for_hashing(storage_updates)?; + } + + provider_rw.commit()?; + Ok(()) +} + +fn bench_state_root(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root"); + + let scenarios = vec![ + BenchParams { num_accounts: 100, updates_per_account: 5, storage_slots_per_account: 10 }, + BenchParams { num_accounts: 1000, updates_per_account: 10, storage_slots_per_account: 20 }, + ]; + + for params in scenarios { + group.bench_with_input( + BenchmarkId::new( + "state_root_task", + format!( + "accounts_{}_updates_{}_slots_{}", + params.num_accounts, + params.updates_per_account, + params.storage_slots_per_account + ), + ), + ¶ms, + |b, params| { + b.iter_with_setup( + || { + let factory = create_test_provider_factory(); + let state_updates = create_bench_state_updates(params); + setup_provider(&factory, &state_updates).expect("failed to setup provider"); + + let trie_input = Arc::new(TrieInput::from_state(Default::default())); + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: trie_input, + }; + + (config, state_updates) + }, + |(config, state_updates)| { + let task = StateRootTask::new(config); + let mut hook = task.state_hook(); + let handle = task.spawn(); + + for update in state_updates { + hook.on_state(&update) + } + drop(hook); + + black_box(handle.wait_for_result().expect("task failed")); + }, + ) + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_root); +criterion_main!(benches); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 763d5d990c5..5fc07abf7a2 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -76,7 +76,7 @@ pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; -mod root; +pub mod root; /// Keeps track of the state of the tree. /// diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index ae22b036b65..53a881387e7 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -37,7 +37,7 @@ pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootE /// Handle to a spawned state root task. #[derive(Debug)] #[allow(dead_code)] -pub(crate) struct StateRootHandle { +pub struct StateRootHandle { /// Channel for receiving the final result. rx: mpsc::Receiver, } @@ -50,14 +50,14 @@ impl StateRootHandle { } /// Waits for the state root calculation to complete. - pub(crate) fn wait_for_result(self) -> StateRootResult { + pub fn wait_for_result(self) -> StateRootResult { self.rx.recv().expect("state root task was dropped without sending result") } } /// Common configuration for state root tasks #[derive(Debug)] -pub(crate) struct StateRootConfig { +pub struct StateRootConfig { /// View over the state in the database. pub consistent_view: ConsistentDbView, /// Latest trie input. @@ -67,7 +67,7 @@ pub(crate) struct StateRootConfig { /// Messages used internally by the state root task #[derive(Debug)] #[allow(dead_code)] -pub(crate) enum StateRootMessage { +pub enum StateRootMessage { /// New state update from transaction execution StateUpdate(EvmState), /// Proof calculation completed for a specific state update @@ -223,7 +223,7 @@ fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] -pub(crate) struct StateRootTask { +pub struct StateRootTask { /// Task configuration. config: StateRootConfig, /// Receiver for state root related messages. @@ -250,7 +250,7 @@ where + 'static, { /// Creates a new state root task with the unified message channel - pub(crate) fn new(config: StateRootConfig) -> Self { + pub fn new(config: StateRootConfig) -> Self { let (tx, rx) = channel(); Self { @@ -264,7 +264,7 @@ where } /// Spawns the state root task and returns a handle to await its result. - pub(crate) fn spawn(self) -> StateRootHandle { + pub fn spawn(self) -> StateRootHandle { let (tx, rx) = mpsc::sync_channel(1); std::thread::Builder::new() .name("State Root Task".to_string()) @@ -279,7 +279,7 @@ where } /// Returns a state hook to be used to send state updates to this task. - pub(crate) fn state_hook(&self) -> impl OnStateHook { + pub fn state_hook(&self) -> impl OnStateHook { let state_hook = StateHookSender::new(self.tx.clone()); move |state: &EvmState| { From bf1688525e08cb40fe74e072a9f487ff973124b3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 21:36:07 +0100 Subject: [PATCH 955/970] chore: add blob_versioned_hashes_iter to block body (#13246) --- crates/primitives-traits/src/block/body.rs | 5 +++++ crates/primitives/src/block.rs | 21 ++++++--------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 14436ee01c5..ed60796ce1b 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -81,6 +81,11 @@ pub trait BlockBody: .sum() } + /// Returns an iterator over all blob versioned hashes in the block body. + fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.transactions().iter().filter_map(|tx| tx.blob_versioned_hashes()).flatten() + } + /// Returns an iterator over the encoded 2718 transactions. /// /// This is also known as `raw transactions`. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b02456f6c48..0ee6f860b58 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -263,12 +263,6 @@ impl SealedBlock { } impl SealedBlock { - /// Returns an iterator over all blob transactions of the block - #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.body.blob_transactions_iter() - } - /// Returns whether or not the block contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -280,19 +274,16 @@ impl SealedBlock { pub fn has_eip7702_transactions(&self) -> bool { self.body.has_eip7702_transactions() } +} - /// Returns only the blob transactions, if any, from the block body. - #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { - self.blob_transactions_iter().collect() - } - +impl SealedBlock +where + B: reth_primitives_traits::BlockBody, +{ /// Returns an iterator over all blob versioned hashes from the block body. #[inline] pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() + self.body.blob_versioned_hashes_iter() } } From af5dc60867236d01fc07554ad08408d5fc894921 Mon Sep 17 00:00:00 2001 From: angel-ding-cb <141944320+angel-ding-cb@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:43:10 -0800 Subject: [PATCH 956/970] Return a propoer error code for txpool is full error (#13245) --- crates/rpc/rpc-eth-types/src/error/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 187e2d943f7..aeea8ea5b89 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -676,6 +676,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: RpcPoolError) -> Self { match error { RpcPoolError::Invalid(err) => err.into(), + RpcPoolError::TxPoolOverflow => { + rpc_error_with_code(EthRpcErrorCode::TransactionRejected.code(), error.to_string()) + } error => internal_rpc_err(error.to_string()), } } From eb6080863bbc41e5e108de3b8f07ff52db33bc1e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 01:48:29 +0400 Subject: [PATCH 957/970] feat(rpc): relax `VaidationApi` and `EngineApi` (#13241) --- Cargo.lock | 3 +- crates/engine/primitives/src/lib.rs | 37 ++++--- crates/ethereum/engine-primitives/src/lib.rs | 24 ++--- crates/ethereum/node/src/node.rs | 14 ++- crates/node/builder/src/rpc.rs | 34 ++++--- crates/optimism/node/src/engine.rs | 24 ++--- crates/optimism/node/src/node.rs | 30 ++++-- crates/rpc/rpc-builder/Cargo.toml | 2 - crates/rpc/rpc-builder/src/eth.rs | 3 +- crates/rpc/rpc-builder/src/lib.rs | 98 ++++++++++++------- crates/rpc/rpc-builder/tests/it/middleware.rs | 3 + crates/rpc/rpc-builder/tests/it/startup.rs | 9 +- crates/rpc/rpc-builder/tests/it/utils.rs | 21 +++- crates/rpc/rpc-engine-api/src/engine_api.rs | 24 ++--- crates/rpc/rpc-engine-api/tests/it/payload.rs | 2 +- .../rpc-types-compat/src/engine/payload.rs | 13 ++- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/validation.rs | 65 ++++++------ examples/custom-engine-types/src/main.rs | 30 +++--- examples/rpc-db/src/main.rs | 12 ++- 20 files changed, 258 insertions(+), 192 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58982032e30..eabf3db01a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8939,6 +8939,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-consensus-common", + "reth-engine-primitives", "reth-errors", "reth-ethereum-consensus", "reth-evm", @@ -8946,7 +8947,6 @@ dependencies = [ "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-payload-validator", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -9020,7 +9020,6 @@ dependencies = [ name = "reth-rpc-builder" version = "1.1.2" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 89fb7459b7d..2bd642cfa20 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -10,6 +10,8 @@ mod error; +use core::fmt; + use alloy_consensus::BlockHeader; use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use error::BeaconOnNewPayloadError; @@ -80,11 +82,28 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads processed by the engine. -pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { +/// Type that validates an [`ExecutionPayload`]. +pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; +} + +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: PayloadValidator { /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -100,20 +119,6 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; - /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout. - /// - /// This function must convert the payload into the executable block and pre-validate its - /// fields. - /// - /// Implementers should ensure that the checks are done in the order that conforms with the - /// engine-API specification. - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError>; - /// Validates the payload attributes with respect to the header. /// /// By default, this enforces that the payload attributes timestamp is greater than the diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index beefd54ca05..59c870f4d28 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -18,7 +18,7 @@ pub use alloy_rpc_types_engine::{ }; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_chainspec::ChainSpec; -use reth_engine_primitives::{EngineTypes, EngineValidator}; +use reth_engine_primitives::{EngineTypes, EngineValidator, PayloadValidator}; use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, @@ -82,12 +82,22 @@ impl EthereumEngineValidator { } } +impl PayloadValidator for EthereumEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for EthereumEngineValidator where Types: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -103,12 +113,4 @@ where ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, attributes.into()) } - - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - self.inner.ensure_well_formed_payload(payload, sidecar) - } } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index b2fc7e677ac..54707e69b26 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -6,14 +6,13 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ - EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, EthereumEngineValidator, + EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, HeaderTy, NodeTypesWithDB, - TxTy, + AddOnsContext, ConfigureEvm, FullNodeComponents, HeaderTy, NodeTypesWithDB, TxTy, }; use reth_node_builder::{ components::{ @@ -37,6 +36,8 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; +pub use reth_ethereum_engine_primitives::EthereumEngineValidator; + /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -353,9 +354,12 @@ pub struct EthereumEngineValidatorBuilder; impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Engine = EthEngineTypes, + Primitives = EthPrimitives, + >, Node: FullNodeComponents, - EthereumEngineValidator: EngineValidator, { type Validator = EthereumEngineValidator; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index e6c9ad23356..c8e08078bb9 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -10,7 +10,7 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, NodeTypes, + AddOnsContext, BlockTy, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, }; use reth_node_core::{ @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use std::sync::Arc; use crate::EthApiBuilderCtx; @@ -402,15 +403,7 @@ where impl RpcAddOns where - N: FullNodeComponents< - Types: ProviderNodeTypes< - Primitives: NodePrimitives< - Block = reth_primitives::Block, - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, - >, + N: FullNodeComponents, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -449,7 +442,7 @@ where Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - engine_validator, + engine_validator.clone(), ); info!(target: "reth::cli", "Engine API handler initialized"); @@ -466,7 +459,12 @@ where .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) .with_consensus(node.consensus().clone()) - .build_with_auth_server(module_config, engine_api, eth_api_builder); + .build_with_auth_server( + module_config, + engine_api, + eth_api_builder, + Arc::new(engine_validator), + ); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -588,7 +586,8 @@ impl>> EthApi /// Helper trait that provides the validator for the engine API pub trait EngineValidatorAddOn: Send { /// The Validator type to use for the engine API. - type Validator: EngineValidator<::Engine>; + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; /// Creates the engine validator for an engine API based node. fn engine_validator( @@ -613,7 +612,8 @@ where /// A type that knows how to build the engine validator. pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. - type Validator: EngineValidator<::Engine>; + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; /// Creates the engine validator. fn build( @@ -625,8 +625,10 @@ pub trait EngineValidatorBuilder: Send + Sync + Clone impl EngineValidatorBuilder for F where Node: FullNodeComponents, - Validator: - EngineValidator<::Engine> + Clone + Unpin + 'static, + Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone + + Unpin + + 'static, F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 063ac3617af..1db50b72ee8 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -12,7 +12,7 @@ use reth_node_api::{ EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, EngineTypes, EngineValidator, + validate_version_specific_fields, EngineTypes, EngineValidator, PayloadValidator, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -77,12 +77,22 @@ impl OpEngineValidator { } } +impl PayloadValidator for OpEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for OpEngineValidator where Types: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -136,14 +146,6 @@ where Ok(()) } - - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) - } } /// Validates the presence of the `withdrawals` field according to the payload timestamp. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 35e33ccd75a..54ff36dabac 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -238,7 +238,12 @@ impl>> OpAddOn impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -283,7 +288,12 @@ where impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -296,8 +306,13 @@ where impl EngineValidatorAddOn for OpAddOns where - N: FullNodeComponents>, - OpEngineValidator: EngineValidator<::Engine>, + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, + >, { type Validator = OpEngineValidator; @@ -674,9 +689,12 @@ pub struct OpEngineValidatorBuilder; impl EngineValidatorBuilder for OpEngineValidatorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, Node: FullNodeComponents, - OpEngineValidator: EngineValidator, { type Validator = OpEngineValidator; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index a0712d617b6..7dbbe7608a7 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -31,8 +31,6 @@ reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -alloy-consensus.workspace = true - # rpc/net jsonrpsee = { workspace = true, features = ["server"] } tower-http = { workspace = true, features = ["full"] } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 2a6744e7b18..7339c7089e5 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use reth_evm::ConfigureEvm; use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; @@ -62,7 +61,7 @@ where >, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Tasks: TaskSpawner + Clone + 'static, { let cache = EthStateCache::spawn_with(provider.clone(), config.cache, executor.clone()); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 1220020504b..ce29b77f09d 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,10 +16,10 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` -//! use alloy_consensus::Header; +//! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::TransactionSigned; +//! use reth_primitives::{Header, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -27,8 +27,18 @@ //! }; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! -//! pub async fn launch( +//! pub async fn launch< +//! Provider, +//! Pool, +//! Network, +//! Events, +//! EvmConfig, +//! BlockExecutor, +//! Consensus, +//! Validator, +//! >( //! provider: Provider, //! pool: Pool, //! network: Network, @@ -36,6 +46,7 @@ //! evm_config: EvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, +//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -53,6 +64,7 @@ //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -71,7 +83,7 @@ //! block_executor, //! consensus, //! ) -//! .build(transports, Box::new(EthApi::with_spawner)); +//! .build(transports, Box::new(EthApi::with_spawner), Arc::new(validator)); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) //! .start(&transport_modules) @@ -83,11 +95,10 @@ //! //! //! ``` -//! use alloy_consensus::Header; -//! use reth_engine_primitives::EngineTypes; +//! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::TransactionSigned; +//! use reth_primitives::{Header, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -98,6 +109,7 @@ //! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! use tokio::try_join; //! //! pub async fn launch< @@ -110,6 +122,7 @@ //! EvmConfig, //! BlockExecutor, //! Consensus, +//! Validator, //! >( //! provider: Provider, //! pool: Pool, @@ -119,6 +132,7 @@ //! evm_config: EvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, +//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -138,6 +152,7 @@ //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -158,8 +173,12 @@ //! ); //! //! // configure the server modules -//! let (modules, auth_module, _registry) = -//! builder.build_with_auth_server(transports, engine_api, Box::new(EthApi::with_spawner)); +//! let (modules, auth_module, _registry) = builder.build_with_auth_server( +//! transports, +//! engine_api, +//! Box::new(EthApi::with_spawner), +//! Arc::new(validator), +//! ); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); @@ -187,7 +206,6 @@ use std::{ }; use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; -use alloy_consensus::Header; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -201,7 +219,7 @@ use jsonrpsee::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::FullConsensus; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::NodePrimitives; @@ -274,6 +292,7 @@ pub async fn launch, block_executor: BlockExecutor, consensus: Arc>, + payload_validator: Arc>, ) -> Result where Provider: FullRpcProvider< @@ -297,12 +316,7 @@ where Header = ::BlockHeader, >, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, { let module_config = module_config.into(); server_config @@ -318,7 +332,7 @@ where block_executor, consensus, ) - .build(module_config, eth), + .build(module_config, eth, payload_validator), ) .await } @@ -651,6 +665,7 @@ where Provider: FullRpcProvider< Block = ::Block, Receipt = ::Receipt, + Header = ::BlockHeader, > + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -661,12 +676,7 @@ where Header = ::BlockHeader, Transaction = ::SignedTx, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can @@ -681,6 +691,7 @@ where module_config: TransportRpcModuleConfig, engine: EngineApi, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> ( TransportRpcModules, AuthRpcModule, @@ -721,6 +732,7 @@ where evm_config, eth, block_executor, + payload_validator, ); let modules = registry.create_transport_rpc_modules(module_config); @@ -738,21 +750,24 @@ where /// # Example /// /// ```no_run - /// use alloy_consensus::Header; /// use reth_consensus::noop::NoopConsensus; + /// use reth_engine_primitives::PayloadValidator; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::TransactionSigned; + /// use reth_primitives::{Header, TransactionSigned}; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; + /// use std::sync::Arc; /// - /// fn init + 'static>( - /// evm: Evm, - /// ) { + /// fn init(evm: Evm, validator: Validator) + /// where + /// Evm: ConfigureEvm
+ 'static, + /// Validator: PayloadValidator + 'static, + /// { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -762,7 +777,7 @@ where /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) /// .with_consensus(NoopConsensus::default()) - /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); + /// .into_registry(Default::default(), Box::new(EthApi::with_spawner), Arc::new(validator)); /// /// let eth_api = registry.eth_api(); /// } @@ -771,6 +786,7 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, @@ -796,6 +812,7 @@ where evm_config, eth, block_executor, + payload_validator, ) } @@ -805,6 +822,7 @@ where self, module_config: TransportRpcModuleConfig, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> TransportRpcModules<()> where EthApi: FullEthApiServer< @@ -843,6 +861,7 @@ where evm_config, eth, block_executor, + payload_validator, ); modules.config = module_config; @@ -957,6 +976,7 @@ pub struct RpcRegistryInner< events: Events, block_executor: BlockExecutor, consensus: Consensus, + payload_validator: Arc>, /// Holds the configuration for the RPC modules config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers @@ -1008,9 +1028,10 @@ where EthApi, >, block_executor: BlockExecutor, + payload_validator: Arc>, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); @@ -1037,6 +1058,7 @@ where blocking_pool_guard, events, block_executor, + payload_validator, } } } @@ -1320,6 +1342,7 @@ where pub fn validation_api(&self) -> ValidationApi where Consensus: reth_consensus::FullConsensus + Clone + 'static, + Provider: BlockReader::Block>, { ValidationApi::new( self.provider.clone(), @@ -1327,6 +1350,7 @@ where self.block_executor.clone(), self.config.flashbots.clone(), Box::new(self.executor.clone()), + self.payload_validator.clone(), ) } } @@ -1334,7 +1358,9 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider::Block> + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -1346,12 +1372,7 @@ where Header = ::BlockHeader, >, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the @@ -1500,6 +1521,7 @@ where self.block_executor.clone(), self.config.flashbots.clone(), Box::new(self.executor.clone()), + self.payload_validator.clone(), ) .into_rpc() .into(), diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 96d818ed4f9..0e0bb80c08b 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -5,6 +5,8 @@ use jsonrpsee::{ types::Request, MethodResponse, }; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; @@ -63,6 +65,7 @@ async fn test_rpc_middleware() { let modules = builder.build( TransportRpcModuleConfig::set_http(RpcModuleSelection::All), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let mylayer = MyMiddlewareLayer::default(); diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 9f6961fbba0..ac53b014956 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,7 +1,9 @@ //! Startup tests -use std::io; +use std::{io, sync::Arc}; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, @@ -30,6 +32,7 @@ async fn test_http_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; @@ -45,6 +48,7 @@ async fn test_ws_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); @@ -66,6 +70,7 @@ async fn test_launch_same_port_different_modules() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -88,6 +93,7 @@ async fn test_launch_same_port_same_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -108,6 +114,7 @@ async fn test_launch_same_port_different_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 175992c0f14..be708dac5f8 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,4 +1,7 @@ -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; @@ -61,8 +64,11 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_http(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_http(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::http(Default::default()) .with_http_address(test_address()) .start(&server) @@ -73,8 +79,11 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .start(&server) @@ -89,6 +98,7 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) @@ -107,6 +117,7 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8b57cb1f19e..2e80c105e7e 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -25,7 +25,7 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, EthereumHardfork}; +use reth_primitives::EthereumHardfork; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -80,11 +80,7 @@ struct EngineApiInner EngineApi where - Provider: HeaderProvider - + BlockReader - + StateProviderFactory - + EvmEnvProvider - + 'static, + Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -573,7 +569,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let len = hashes.len() as u64; @@ -748,11 +744,7 @@ where impl EngineApiServer for EngineApi where - Provider: HeaderProvider - + BlockReader - + StateProviderFactory - + EvmEnvProvider - + 'static, + Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -1045,7 +1037,7 @@ mod tests { use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::SealedBlock; + use reth_primitives::{Block, SealedBlock}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; @@ -1171,7 +1163,7 @@ mod tests { let expected = blocks .iter() .cloned() - .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) + .map(|b| Some(convert_to_payload_body_v1(b.unseal::()))) .collect::>(); let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); @@ -1213,7 +1205,7 @@ mod tests { if first_missing_range.contains(&b.number) { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); @@ -1232,7 +1224,7 @@ mod tests { { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 78b0351d4a5..363c816d240 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -38,7 +38,7 @@ fn payload_body_roundtrip() { 0..=99, BlockRangeParams { tx_count: 0..2, ..Default::default() }, ) { - let unsealed = block.clone().unseal(); + let unsealed = block.clone().unseal::(); let payload_body: ExecutionPayloadBodyV1 = convert_to_payload_body_v1(unsealed); assert_eq!( diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f504d57addc..3be7835a35a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -16,6 +16,7 @@ use reth_primitives::{ proofs::{self}, Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; +use reth_primitives_traits::BlockBody as _; /// Converts [`ExecutionPayloadV1`] to [`Block`] pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { @@ -320,15 +321,13 @@ pub fn validate_block_hash( } /// Converts [`Block`] to [`ExecutionPayloadBodyV1`] -pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); +pub fn convert_to_payload_body_v1( + value: impl reth_primitives_traits::Block, +) -> ExecutionPayloadBodyV1 { + let transactions = value.body().transactions().iter().map(|tx| tx.encoded_2718().into()); ExecutionPayloadBodyV1 { transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), + withdrawals: value.body().withdrawals().cloned().map(Withdrawals::into_inner), } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5efae46f006..14519860e76 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true +reth-engine-primitives.workspace = true reth-errors.workspace = true reth-ethereum-consensus.workspace = true reth-provider.workspace = true @@ -35,7 +36,6 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-consensus.workspace = true -reth-payload-validator.workspace = true # ethereum alloy-consensus.workspace = true diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b13e99eb21c..a7042126cba 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -14,10 +14,10 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +use reth_engine_primitives::PayloadValidator; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_primitives_traits::{Block as _, BlockBody}; use reth_provider::{ @@ -34,14 +34,13 @@ use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] inner: Arc>, } impl ValidationApi where - Provider: ChainSpecProvider, E: BlockExecutorProvider, { /// Create a new instance of the [`ValidationApi`] @@ -51,10 +50,12 @@ where executor_provider: E, config: ValidationApiConfig, task_spawner: Box, + payload_validator: Arc< + dyn PayloadValidator::Block>, + >, ) -> Self { let ValidationApiConfig { disallow } = config; - let payload_validator = ExecutionPayloadValidator::new(provider.chain_spec()); let inner = Arc::new(ValidationApiInner { provider, consensus, @@ -91,16 +92,11 @@ where impl ValidationApi where - Provider: BlockReaderIdExt
+ Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = Provider::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -116,8 +112,8 @@ where self.consensus.validate_block_pre_execution(&block)?; if !self.disallow.is_empty() { - if self.disallow.contains(&block.beneficiary) { - return Err(ValidationApiError::Blacklist(block.beneficiary)) + if self.disallow.contains(&block.beneficiary()) { + return Err(ValidationApiError::Blacklist(block.beneficiary())) } if self.disallow.contains(&message.proposer_fee_recipient) { return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) @@ -137,9 +133,9 @@ where let latest_header = self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; - if latest_header.hash() != block.header.parent_hash { + if latest_header.hash() != block.header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: block.header.parent_hash, expected: latest_header.hash() } + GotExpected { got: block.header.parent_hash(), expected: latest_header.hash() } .into(), ) .into()) @@ -200,7 +196,7 @@ where /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. fn validate_message_against_header( &self, - header: &SealedHeader, + header: &SealedHeader<::BlockHeader>, message: &BidTrace, ) -> Result<(), ValidationApiError> { if header.hash() != message.block_hash { @@ -208,20 +204,20 @@ where got: message.block_hash, expected: header.hash(), })) - } else if header.parent_hash != message.parent_hash { + } else if header.parent_hash() != message.parent_hash { Err(ValidationApiError::ParentHashMismatch(GotExpected { got: message.parent_hash, - expected: header.parent_hash, + expected: header.parent_hash(), })) - } else if header.gas_limit != message.gas_limit { + } else if header.gas_limit() != message.gas_limit { Err(ValidationApiError::GasLimitMismatch(GotExpected { got: message.gas_limit, - expected: header.gas_limit, + expected: header.gas_limit(), })) - } else if header.gas_used != message.gas_used { + } else if header.gas_used() != message.gas_used { return Err(ValidationApiError::GasUsedMismatch(GotExpected { got: message.gas_used, - expected: header.gas_used, + expected: header.gas_used(), })) } else { Ok(()) @@ -235,20 +231,20 @@ where fn validate_gas_limit( &self, registered_gas_limit: u64, - parent_header: &SealedHeader, - header: &SealedHeader, + parent_header: &SealedHeader<::BlockHeader>, + header: &SealedHeader<::BlockHeader>, ) -> Result<(), ValidationApiError> { let max_gas_limit = - parent_header.gas_limit + parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + parent_header.gas_limit() + parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR - 1; let min_gas_limit = - parent_header.gas_limit - parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + parent_header.gas_limit() - parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR + 1; let best_gas_limit = std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); - if best_gas_limit != header.gas_limit { + if best_gas_limit != header.gas_limit() { return Err(ValidationApiError::GasLimitMismatch(GotExpected { - got: header.gas_limit, + got: header.gas_limit(), expected: best_gas_limit, })) } @@ -409,17 +405,12 @@ where #[async_trait] impl BlockSubmissionValidationApiServer for ValidationApi where - Provider: BlockReaderIdExt
+ Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = Provider::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + E: BlockExecutorProvider, { async fn validate_builder_submission_v1( &self, @@ -473,13 +464,13 @@ where } #[derive(Debug)] -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: ExecutionPayloadValidator, + payload_validator: Arc::Block>>, /// Block executor factory. executor_provider: E, /// Set of disallowed addresses diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 1034effebf8..f30956d8f5c 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -55,7 +55,7 @@ use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, - FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, + FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, PayloadValidator, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ @@ -189,12 +189,22 @@ impl CustomEngineValidator { } } +impl PayloadValidator for CustomEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for CustomEngineValidator where T: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -220,14 +230,6 @@ where Ok(()) } - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) - } - fn validate_payload_attributes_against_header( &self, _attr: &::PayloadAttributes, @@ -246,7 +248,11 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where N: FullNodeComponents< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, { type Validator = CustomEngineValidator; diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 92ae86f00bb..cde891036e6 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -34,7 +34,9 @@ use reth::rpc::builder::{ // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{ + node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, +}; use reth_provider::{test_utils::TestCanonStateSubscriptions, ChainSpecProvider}; // Custom rpc extension @@ -70,11 +72,15 @@ async fn main() -> eyre::Result<()> { .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) - .with_consensus(EthBeaconConsensus::new(spec)); + .with_consensus(EthBeaconConsensus::new(spec.clone())); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config, Box::new(EthApi::with_spawner)); + let mut server = rpc_builder.build( + config, + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(spec)), + ); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; From 386e4b3ebd6ac87528e32bc5789b8c35d2b5f9f0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 23:19:50 +0100 Subject: [PATCH 958/970] feat: extract zstd compressors (#13250) --- Cargo.lock | 9 +- Cargo.toml | 2 + crates/primitives/Cargo.toml | 7 +- crates/primitives/src/lib.rs | 4 - crates/primitives/src/receipt.rs | 4 +- crates/primitives/src/transaction/mod.rs | 8 +- crates/storage/zstd-compressors/Cargo.toml | 19 ++++ .../zstd-compressors}/receipt_dictionary.bin | Bin .../zstd-compressors/src/lib.rs} | 82 +++++++++++------- .../transaction_dictionary.bin | Bin 10 files changed, 90 insertions(+), 45 deletions(-) create mode 100644 crates/storage/zstd-compressors/Cargo.toml rename crates/{primitives/src/compression => storage/zstd-compressors}/receipt_dictionary.bin (100%) rename crates/{primitives/src/compression/mod.rs => storage/zstd-compressors/src/lib.rs} (62%) rename crates/{primitives/src/compression => storage/zstd-compressors}/transaction_dictionary.bin (100%) diff --git a/Cargo.lock b/Cargo.lock index eabf3db01a7..3de11a253bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8743,6 +8743,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-trie-common", + "reth-zstd-compressors", "revm-primitives", "rstest", "secp256k1", @@ -8750,7 +8751,6 @@ dependencies = [ "serde_json", "serde_with", "test-fuzz", - "zstd", ] [[package]] @@ -9646,6 +9646,13 @@ dependencies = [ "thiserror 2.0.5", ] +[[package]] +name = "reth-zstd-compressors" +version = "1.1.2" +dependencies = [ + "zstd", +] + [[package]] name = "revm" version = "18.0.0" diff --git a/Cargo.toml b/Cargo.toml index 142b00290b9..beef7d330dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ members = [ "crates/storage/nippy-jar/", "crates/storage/provider/", "crates/storage/storage-api/", + "crates/storage/zstd-compressors/", "crates/tasks/", "crates/tokio-util/", "crates/tracing/", @@ -422,6 +423,7 @@ reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-sparse = { path = "crates/trie/sparse" } +reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 2f8f37bcd35..109b20ec2bc 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -18,6 +18,7 @@ reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } +reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true @@ -55,7 +56,6 @@ rand = { workspace = true, optional = true } rayon.workspace = true serde.workspace = true serde_with = { workspace = true, optional = true } -zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -108,11 +108,12 @@ std = [ "alloy-rlp/std", "reth-ethereum-forks/std", "bytes/std", - "derive_more/std" + "derive_more/std", + "reth-zstd-compressors?/std" ] reth-codec = [ "dep:reth-codecs", - "dep:zstd", + "dep:reth-zstd-compressors", "dep:modular-bitfield", "std", "reth-primitives-traits/reth-codec", ] diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index edbc73a9362..18fe1498b8a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -27,8 +27,6 @@ pub use traits::*; #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; -#[cfg(feature = "reth-codec")] -mod compression; pub mod proofs; mod receipt; pub use reth_static_file_types as static_file; @@ -38,8 +36,6 @@ pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, }; -#[cfg(feature = "reth-codec")] -pub use compression::*; pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 419c36c2080..62c664e22a4 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -12,9 +12,9 @@ use derive_more::{DerefMut, From, IntoIterator}; use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::TxType; +#[cfg(feature = "reth-codec")] +use reth_zstd_compressors::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub use reth_primitives_traits::receipt::gas_spent_by_transactions; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 670ee7f352e..b64cf094042 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1365,14 +1365,14 @@ impl reth_codecs::Compact for TransactionSigned { let tx_bits = if zstd_bit { let mut tmp = Vec::with_capacity(256); if cfg!(feature = "std") { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { + reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| { let mut compressor = compressor.borrow_mut(); let tx_bits = self.transaction.to_compact(&mut tmp); buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); tx_bits as u8 }) } else { - let mut compressor = crate::compression::create_tx_compressor(); + let mut compressor = reth_zstd_compressors::create_tx_compressor(); let tx_bits = self.transaction.to_compact(&mut tmp); buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); tx_bits as u8 @@ -1399,7 +1399,7 @@ impl reth_codecs::Compact for TransactionSigned { let zstd_bit = bitflags >> 3; let (transaction, buf) = if zstd_bit != 0 { if cfg!(feature = "std") { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| { let mut decompressor = decompressor.borrow_mut(); // TODO: enforce that zstd is only present at a "top" level type @@ -1411,7 +1411,7 @@ impl reth_codecs::Compact for TransactionSigned { (transaction, buf) }) } else { - let mut decompressor = crate::compression::create_tx_decompressor(); + let mut decompressor = reth_zstd_compressors::create_tx_decompressor(); let transaction_type = (bitflags & 0b110) >> 1; let (transaction, _) = Transaction::from_compact(decompressor.decompress(buf), transaction_type); diff --git a/crates/storage/zstd-compressors/Cargo.toml b/crates/storage/zstd-compressors/Cargo.toml new file mode 100644 index 00000000000..357684f32fc --- /dev/null +++ b/crates/storage/zstd-compressors/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "reth-zstd-compressors" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used zstd compressors." + +[lints] +workspace = true + +[dependencies] +zstd = { workspace = true, features = ["experimental"] } + +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/primitives/src/compression/receipt_dictionary.bin b/crates/storage/zstd-compressors/receipt_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/receipt_dictionary.bin rename to crates/storage/zstd-compressors/receipt_dictionary.bin diff --git a/crates/primitives/src/compression/mod.rs b/crates/storage/zstd-compressors/src/lib.rs similarity index 62% rename from crates/primitives/src/compression/mod.rs rename to crates/storage/zstd-compressors/src/lib.rs index ecceafc2068..d5167120bc7 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/storage/zstd-compressors/src/lib.rs @@ -1,41 +1,61 @@ +//! Commonly used zstd [`Compressor`] and [`Decompressor`] for reth types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use crate::alloc::string::ToString; use alloc::vec::Vec; -use core::cell::RefCell; use zstd::bulk::{Compressor, Decompressor}; /// Compression/Decompression dictionary for `Receipt`. -pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin"); +pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("../receipt_dictionary.bin"); /// Compression/Decompression dictionary for `Transaction`. -pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("./transaction_dictionary.bin"); +pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("../transaction_dictionary.bin"); -// We use `thread_local` compressors and decompressors because dictionaries can be quite big, and -// zstd-rs recommends to use one context/compressor per thread #[cfg(feature = "std")] -std::thread_local! { - /// Thread Transaction compressor. - pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction compressor"), - ); - - /// Thread Transaction decompressor. - pub static TRANSACTION_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction decompressor"), - )); - - /// Thread receipt compressor. - pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, RECEIPT_DICTIONARY) - .expect("failed to initialize receipt compressor"), - ); - - /// Thread receipt decompressor. - pub static RECEIPT_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(RECEIPT_DICTIONARY) - .expect("failed to initialize receipt decompressor"), - )); +pub use locals::*; +#[cfg(feature = "std")] +mod locals { + use super::*; + use core::cell::RefCell; + + // We use `thread_local` compressors and decompressors because dictionaries can be quite big, + // and zstd-rs recommends to use one context/compressor per thread + std::thread_local! { + /// Thread Transaction compressor. + pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction compressor"), + ); + + /// Thread Transaction decompressor. + pub static TRANSACTION_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction decompressor"), + )); + + /// Thread receipt compressor. + pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, RECEIPT_DICTIONARY) + .expect("failed to initialize receipt compressor"), + ); + + /// Thread receipt decompressor. + pub static RECEIPT_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(RECEIPT_DICTIONARY) + .expect("failed to initialize receipt decompressor"), + )); + } } /// Fn creates tx [`Compressor`] diff --git a/crates/primitives/src/compression/transaction_dictionary.bin b/crates/storage/zstd-compressors/transaction_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/transaction_dictionary.bin rename to crates/storage/zstd-compressors/transaction_dictionary.bin From 980e62a5b8c047ec54d2872079481fff42a7c9db Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 04:17:34 +0400 Subject: [PATCH 959/970] chore: relax `ProviderFactory` setup (#13254) --- crates/cli/commands/src/common.rs | 6 ++--- .../cli/commands/src/stage/dump/execution.rs | 11 +++------ crates/cli/commands/src/stage/dump/merkle.rs | 2 +- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/engine/local/src/service.rs | 4 ++-- crates/engine/service/src/service.rs | 4 ++-- crates/engine/tree/src/persistence.rs | 21 +++++++--------- crates/evm/src/noop.rs | 24 +++++++++---------- crates/net/downloaders/src/bodies/noop.rs | 13 +++++----- crates/net/downloaders/src/headers/noop.rs | 14 +++++------ crates/node/builder/src/launch/common.rs | 20 ++++------------ crates/node/builder/src/launch/engine.rs | 15 ++++-------- crates/node/builder/src/launch/exex.rs | 10 ++++---- 13 files changed, 61 insertions(+), 85 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 174eeffa396..e206715fc01 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -2,10 +2,10 @@ use alloy_primitives::B256; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::noop::NoopConsensus; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -151,10 +151,10 @@ impl EnvironmentArgs { .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain.clone())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), config.stages.clone(), prune_modes.clone(), )) diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 73d2e8a9f8f..1460c6bb6f6 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -140,13 +140,7 @@ fn import_tables_with_range( /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. fn unwind_and_copy< - N: ProviderNodeTypes< - Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, - >, + N: ProviderNodeTypes>, >( db_tool: &DbTool, from: u64, @@ -155,7 +149,8 @@ fn unwind_and_copy< ) -> eyre::Result<()> { let provider = db_tool.provider_factory.database_provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); + let mut exec_stage = + ExecutionStage::new_with_executor(NoopBlockExecutorProvider::::default()); exec_stage.unwind( &provider, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 59a25c492aa..f0dbb1a1faf 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -112,7 +112,7 @@ fn unwind_and_copy< // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - NoopBlockExecutorProvider::default(), // Not necessary for unwinding. + NoopBlockExecutorProvider::::default(), // Not necessary for unwinding. ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index de535d65508..cc5d719d270 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -120,7 +120,7 @@ impl> Command let (tip_tx, tip_rx) = watch::channel(B256::ZERO); // Unwinding does not require a valid executor - let executor = NoopBlockExecutorProvider::default(); + let executor = NoopBlockExecutorProvider::::default(); let builder = if self.offline { Pipeline::::builder().add_stages( diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 6ce588a8264..0bdc77dbe4b 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -27,7 +27,7 @@ use reth_engine_tree::{ EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, RequestHandlerEvent, }, - persistence::{PersistenceHandle, PersistenceNodeTypes}, + persistence::PersistenceHandle, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; @@ -59,7 +59,7 @@ where impl LocalEngineService where - N: EngineNodeTypes + PersistenceNodeTypes, + N: EngineNodeTypes, { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index d839fab2c0e..27de4a63605 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -8,7 +8,7 @@ use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, - persistence::{PersistenceHandle, PersistenceNodeTypes}, + persistence::PersistenceHandle, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ @@ -59,7 +59,7 @@ where impl EngineService where - N: EngineNodeTypes + PersistenceNodeTypes, + N: EngineNodeTypes, Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 2f0b20f02dc..c7ad4110086 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,4 +1,5 @@ use crate::metrics::PersistenceMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; @@ -17,11 +18,6 @@ use thiserror::Error; use tokio::sync::oneshot; use tracing::{debug, error}; -/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within -/// [`PersistenceService`]. -pub trait PersistenceNodeTypes: ProviderNodeTypes {} -impl PersistenceNodeTypes for T where T: ProviderNodeTypes {} - /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -32,7 +28,7 @@ impl PersistenceNodeTypes for T where T: ProviderNodeTypes where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// The provider factory to use provider: ProviderFactory, @@ -48,7 +44,7 @@ where impl PersistenceService where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// Create a new persistence service pub fn new( @@ -74,7 +70,7 @@ where impl PersistenceService where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// This is the main loop, that will listen to database events and perform the requested /// database actions @@ -148,9 +144,10 @@ where ) -> Result, PersistenceError> { debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); - let last_block_hash_num = blocks - .last() - .map(|block| BlockNumHash { hash: block.block().hash(), number: block.block().number }); + let last_block_hash_num = blocks.last().map(|block| BlockNumHash { + hash: block.block().hash(), + number: block.block().header().number(), + }); if last_block_hash_num.is_some() { let provider_rw = self.provider.database_provider_rw()?; @@ -219,7 +216,7 @@ impl PersistenceHandle { sync_metrics_tx: MetricEventsSender, ) -> PersistenceHandle where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { // create the initial channels let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 7b1063533da..816a4c83564 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -20,10 +20,10 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; /// A [`BlockExecutorProvider`] implementation that does nothing. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct NoopBlockExecutorProvider; +pub struct NoopBlockExecutorProvider

(core::marker::PhantomData

); -impl BlockExecutorProvider for NoopBlockExecutorProvider { - type Primitives = EthPrimitives; +impl BlockExecutorProvider for NoopBlockExecutorProvider

{ + type Primitives = P; type Executor + Display>> = Self; @@ -33,20 +33,20 @@ impl BlockExecutorProvider for NoopBlockExecutorProvider { where DB: Database + Display>, { - Self + Self::default() } fn batch_executor(&self, _: DB) -> Self::BatchExecutor where DB: Database + Display>, { - Self + Self::default() } } -impl Executor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; +impl Executor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; fn execute(self, _: Self::Input<'_>) -> Result { @@ -76,9 +76,9 @@ impl Executor for NoopBlockExecutorProvider { } } -impl BatchExecutor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; +impl BatchExecutor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = ExecutionOutcome; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index f311a242c20..dd3e6e9691b 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,24 +4,23 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; -use reth_primitives::BlockBody; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader; +pub struct NoopBodiesDownloader(std::marker::PhantomData); -impl BodyDownloader for NoopBodiesDownloader { - type Body = BlockBody; +impl BodyDownloader for NoopBodiesDownloader { + type Body = B; fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 58da7312387..e9dee56dd2e 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,28 +1,28 @@ -use alloy_consensus::Header; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, error::HeadersDownloaderError, }; use reth_primitives::SealedHeader; +use std::fmt::Debug; /// A [`HeaderDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopHeaderDownloader; +pub struct NoopHeaderDownloader(std::marker::PhantomData); -impl HeaderDownloader for NoopHeaderDownloader { - type Header = Header; +impl HeaderDownloader for NoopHeaderDownloader { + type Header = H; - fn update_local_head(&mut self, _: SealedHeader) {} + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} fn set_batch_size(&mut self, _: usize) {} } -impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError

>; +impl Stream for NoopHeaderDownloader { + type Item = Result>, HeadersDownloaderError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 104ecef9e80..62226cb0b1c 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -10,9 +10,9 @@ use crate::{ use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; +use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -383,12 +383,7 @@ where pub async fn create_provider_factory(&self) -> eyre::Result> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, + N::Primitives: FullNodePrimitives, { let factory = ProviderFactory::new( self.right().clone(), @@ -420,10 +415,10 @@ where .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain_spec())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), self.toml_config().stages.clone(), self.prune_modes(), )) @@ -455,12 +450,7 @@ where ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, + N::Primitives: FullNodePrimitives, { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 264de07048a..054def94e50 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -3,7 +3,7 @@ use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, - BeaconConsensusEngineHandle, + BeaconConsensusEngineHandle, EngineNodeTypes, }; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; @@ -11,7 +11,6 @@ use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, - persistence::PersistenceNodeTypes, tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; @@ -28,8 +27,8 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::{EthPrimitives, EthereumHardforks}; -use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; +use reth_primitives::EthereumHardforks; +use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -70,17 +69,13 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: - ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, + Types: EngineNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns> + EngineValidatorAddOn< NodeAdapter, - Validator: EngineValidator< - ::Engine, - Block = BlockTy, - >, + Validator: EngineValidator>, >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index 0eef0d00576..0235dd929e2 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -10,7 +10,7 @@ use reth_exex::{ DEFAULT_EXEX_MANAGER_CAPACITY, }; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::{EthPrimitives, Head}; +use reth_primitives::Head; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use tracing::Instrument; @@ -25,9 +25,7 @@ pub struct ExExLauncher { config_container: WithConfigs<::ChainSpec>, } -impl> + Clone> - ExExLauncher -{ +impl ExExLauncher { /// Create a new `ExExLauncher` with the given extensions. pub const fn new( head: Head, @@ -42,7 +40,9 @@ impl> + Cl /// /// Spawns all extensions and returns the handle to the exex manager if any extensions are /// installed. - pub async fn launch(self) -> eyre::Result> { + pub async fn launch( + self, + ) -> eyre::Result::Primitives>>> { let Self { head, extensions, components, config_container } = self; if extensions.is_empty() { From c9bd64018a0efddeed79911e5af8d28748f19e3a Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 19:21:46 -0500 Subject: [PATCH 960/970] chore: use `BlockWithParent` for `StageError` (#13198) --- Cargo.lock | 1 + .../beacon/src/engine/invalid_headers.rs | 41 ++++++++++--------- crates/consensus/beacon/src/engine/mod.rs | 30 +++++++------- crates/engine/tree/src/tree/mod.rs | 18 ++++---- crates/primitives-traits/src/header/sealed.rs | 7 +++- crates/stages/api/Cargo.toml | 2 + crates/stages/api/src/error.rs | 22 +++++----- crates/stages/api/src/pipeline/ctrl.rs | 4 +- crates/stages/api/src/pipeline/mod.rs | 16 ++++---- crates/stages/stages/src/stages/execution.rs | 15 +++++-- crates/stages/stages/src/stages/headers.rs | 12 +++++- crates/stages/stages/src/stages/merkle.rs | 2 +- .../stages/src/stages/sender_recovery.rs | 2 +- crates/stages/stages/src/stages/utils.rs | 5 ++- testing/testing-utils/src/generators.rs | 15 ++++++- 15 files changed, 117 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3de11a253bd..08259a54aba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9293,6 +9293,7 @@ dependencies = [ name = "reth-stages-api" version = "1.1.2" dependencies = [ + "alloy-eips", "alloy-primitives", "aquamarine", "assert_matches", diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 0a72129a627..2e2bc37a27e 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,12 +1,11 @@ -use alloy_consensus::Header; +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; -use std::{fmt::Debug, sync::Arc}; +use std::fmt::Debug; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -17,20 +16,20 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap>, + headers: LruMap, /// Metrics for the cache. metrics: InvalidHeaderCacheMetrics, } -impl InvalidHeaderCache { +impl InvalidHeaderCache { /// Invalid header cache constructor. pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc) { + fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -38,7 +37,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -53,7 +52,11 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor(&mut self, header_hash: B256, invalid_ancestor: Arc) { + pub fn insert_with_invalid_ancestor( + &mut self, + header_hash: B256, + invalid_ancestor: BlockWithParent, + ) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); self.insert_entry(header_hash, invalid_ancestor); @@ -65,12 +68,10 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { - if self.get(&invalid_ancestor.hash()).is_none() { - let hash = invalid_ancestor.hash(); - let header = invalid_ancestor.unseal(); - warn!(target: "consensus::engine", ?hash, ?header, "Bad block with hash"); - self.insert_entry(hash, Arc::new(header)); + pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { + if self.get(&invalid_ancestor.block.hash).is_none() { + warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); + self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); // update metrics self.metrics.unique_inserts.increment(1); @@ -79,11 +80,11 @@ impl InvalidHeaderCache { } } -struct HeaderEntry { +struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, - /// The actually header entry - header: Arc, + /// The actual header entry + header: BlockWithParent, } /// Metrics for the invalid headers cache. @@ -103,13 +104,15 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; + use reth_primitives::SealedHeader; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); let header = Header::default(); let header = SealedHeader::seal(header); - cache.insert(header.clone()); + cache.insert(header.block_with_parent()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f188e495be4..c41f9283db8 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -760,14 +760,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.invalid_headers.get(¤t_hash); + let mut current_block = self.invalid_headers.get(¤t_hash); + while let Some(block) = current_block { + current_hash = block.parent; + current_block = self.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && + if current_block.is_none() && self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() { return Ok(Some(current_hash)) @@ -806,13 +806,13 @@ where head: B256, ) -> ProviderResult> { // check if the check hash was previously marked as invalid - let Some(header) = self.invalid_headers.get(&check) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(block.parent)?; // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, header); + self.invalid_headers.insert_with_invalid_ancestor(head, block); Ok(Some(status)) } @@ -821,10 +821,10 @@ where /// to a forkchoice update. fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid - let Some(header) = self.invalid_headers.get(&head) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(block.parent)?)) } /// Record latency metrics for one call to make a block canonical @@ -1454,7 +1454,7 @@ where fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.invalid_headers.insert(*bad_block); return Ok(()) @@ -1673,7 +1673,7 @@ where self.latest_valid_hash_for_invalid_payload(block.parent_hash)? }; // keep track of the invalid header - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: error.to_string() }, latest_valid_hash, @@ -1782,7 +1782,7 @@ where let (block, err) = err.split(); warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); } } } @@ -2035,7 +2035,7 @@ mod tests { .await; assert_matches!( res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } @@ -2141,7 +2141,7 @@ mod tests { assert_matches!( rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5fc07abf7a2..ce9bddd90c1 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1328,7 +1328,7 @@ where // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "engine::tree", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "engine::tree", invalid_block=?bad_block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.state.invalid_headers.insert(*bad_block); return Ok(()) @@ -1678,14 +1678,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.state.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.state.invalid_headers.get(¤t_hash); + let mut current_block = self.state.invalid_headers.get(¤t_hash); + while let Some(block_with_parent) = current_block { + current_hash = block_with_parent.parent; + current_block = self.state.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + if current_block.is_none() && self.block_by_hash(current_hash)?.is_some() { return Ok(Some(current_hash)) } } @@ -1735,7 +1735,7 @@ where let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(header.parent)?; // insert the head block into the invalid header cache self.state.invalid_headers.insert_with_invalid_ancestor(head, header); @@ -1749,7 +1749,7 @@ where // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(header.parent)?)) } /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -2395,7 +2395,7 @@ where }; // keep track of the invalid header - self.state.invalid_headers.insert(block.header); + self.state.invalid_headers.insert(block.header.block_with_parent()); Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: validation_err.to_string() }, latest_valid_hash, diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index e99b0e1c17f..61b021a0879 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,7 +1,7 @@ use crate::InMemorySize; pub use alloy_consensus::Header; use alloy_consensus::Sealed; -use alloy_eips::BlockNumHash; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use alloy_primitives::{keccak256, BlockHash, Sealable}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; @@ -65,6 +65,11 @@ impl SealedHeader { pub fn num_hash(&self) -> BlockNumHash { BlockNumHash::new(self.number(), self.hash) } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } } impl InMemorySize for SealedHeader { diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 88a8e3b96d1..ffa34afa71e 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -23,7 +23,9 @@ reth-errors.workspace = true reth-stages-types.workspace = true reth-static-file-types.workspace = true +# alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 9a4ef35aaf2..b63dd20f77c 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,8 +1,8 @@ use crate::PipelineEvent; +use alloy_eips::eip1898::BlockWithParent; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; -use reth_primitives_traits::SealedHeader; use reth_provider::ProviderError; use reth_prune::{PruneSegment, PruneSegmentError, PrunerError}; use reth_static_file_types::StaticFileSegment; @@ -34,10 +34,10 @@ impl BlockErrorKind { #[derive(Error, Debug)] pub enum StageError { /// The stage encountered an error related to a block. - #[error("stage encountered an error in block #{number}: {error}", number = block.number)] + #[error("stage encountered an error in block #{number}: {error}", number = block.block.number)] Block { /// The block that caused the error. - block: Box, + block: Box, /// The specific error type, either consensus or execution error. #[source] error: BlockErrorKind, @@ -48,16 +48,16 @@ pub enum StageError { "stage encountered inconsistent chain: \ downloaded header #{header_number} ({header_hash}) is detached from \ local head #{head_number} ({head_hash}): {error}", - header_number = header.number, - header_hash = header.hash(), - head_number = local_head.number, - head_hash = local_head.hash(), + header_number = header.block.number, + header_hash = header.block.hash, + head_number = local_head.block.number, + head_hash = local_head.block.hash, )] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box, /// The header we attempted to attach. - header: Box, + header: Box, /// The error that occurred when attempting to attach the header. #[source] error: Box, @@ -92,10 +92,10 @@ pub enum StageError { #[error("invalid download response: {0}")] Download(#[from] DownloadError), /// Database is ahead of static file data. - #[error("missing static file data for block number: {number}", number = block.number)] + #[error("missing static file data for block number: {number}", number = block.block.number)] MissingStaticFileData { /// Starting block with missing data. - block: Box, + block: Box, /// Static File segment segment: StaticFileSegment, }, diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 16185755245..378385e97b7 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -1,5 +1,5 @@ +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::BlockNumber; -use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// @@ -11,7 +11,7 @@ pub enum ControlFlow { /// The block to unwind to. target: BlockNumber, /// The block that caused the unwind. - bad_block: Box, + bad_block: Box, }, /// The pipeline made progress. Continue { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index ec57de8d11c..2cb98d44f93 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -223,7 +223,7 @@ impl Pipeline { } ControlFlow::Continue { block_number } => self.progress.update(block_number), ControlFlow::Unwind { target, bad_block } => { - self.unwind(target, Some(bad_block.number))?; + self.unwind(target, Some(bad_block.block.number))?; return Ok(ControlFlow::Unwind { target, bad_block }) } } @@ -505,7 +505,7 @@ fn on_stage_error( // We unwind because of a detached head. let unwind_to = - local_head.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); + local_head.block.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) } else if let StageError::Block { block, error } = err { match error { @@ -513,7 +513,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered a validation error: {validation_error}" ); @@ -542,7 +542,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered an execution error: {execution_error}" ); @@ -560,12 +560,12 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, segment = %segment, "Stage is missing static file data." ); - Ok(Some(ControlFlow::Unwind { target: block.number - 1, bad_block: block })) + Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block })) } else if err.is_fatal() { error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); Err(err.into()) @@ -603,7 +603,7 @@ mod tests { use reth_errors::ProviderError; use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; use reth_prune::PruneModes; - use reth_testing_utils::{generators, generators::random_header}; + use reth_testing_utils::generators::{self, random_block_with_parent}; use tokio_stream::StreamExt; #[test] @@ -975,7 +975,7 @@ mod tests { .add_stage( TestStage::new(StageId::Other("B")) .add_exec(Err(StageError::Block { - block: Box::new(random_header( + block: Box::new(random_block_with_parent( &mut generators::rng(), 5, Default::default(), diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index c8cc8908086..91afc33efaa 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,6 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; @@ -11,7 +12,7 @@ use reth_evm::{ }; use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives::StaticFileSegment; use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, @@ -359,9 +360,15 @@ where let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::seal(block.header().clone())), - error: BlockErrorKind::Execution(error), + executor.execute_and_verify_one(input).map_err(|error| { + let header = block.header(); + StageError::Block { + block: Box::new(BlockWithParent::new( + header.parent_hash(), + NumHash::new(header.number(), header.hash_slow()), + )), + error: BlockErrorKind::Execution(error), + } }) })?; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 7b9b394b561..7ca9cae590b 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,4 +1,5 @@ use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; @@ -143,7 +144,10 @@ where // Header validation self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { StageError::Block { - block: Box::new(SealedHeader::new(header.clone(), header_hash)), + block: Box::new(BlockWithParent::new( + header.parent_hash, + NumHash::new(header.number, header_hash), + )), error: BlockErrorKind::Validation(error), } })?; @@ -272,7 +276,11 @@ where } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", %error, "Cannot attach header to head"); - return Poll::Ready(Err(StageError::DetachedHead { local_head, header, error })) + return Poll::Ready(Err(StageError::DetachedHead { + local_head: Box::new(local_head.block_with_parent()), + header: Box::new(header.block_with_parent()), + error, + })) } None => return Poll::Ready(Err(StageError::ChannelClosed)), } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 8095dfed904..ff4d37cf3f6 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -357,7 +357,7 @@ fn validate_state_root( error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), - block: Box::new(expected), + block: Box::new(expected.block_with_parent()), }) } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index d34a4b07921..b5506068f48 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -192,7 +192,7 @@ where })?; Err(StageError::Block { - block: Box::new(sealed_header), + block: Box::new(sealed_header.block_with_parent()), error: BlockErrorKind::Validation( ConsensusError::TransactionSignerRecoveryError, ), diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 34aaeee44be..c2a7c6ede02 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -279,5 +279,8 @@ where let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - Ok(StageError::MissingStaticFileData { block: missing_block, segment }) + Ok(StageError::MissingStaticFileData { + block: Box::new(missing_block.block_with_parent()), + segment, + }) } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 9963b447e96..28ba171bdb3 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,7 +1,11 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. use alloy_consensus::{Header, Transaction as _, TxLegacy}; -use alloy_eips::eip4895::{Withdrawal, Withdrawals}; +use alloy_eips::{ + eip1898::BlockWithParent, + eip4895::{Withdrawal, Withdrawals}, + NumHash, +}; use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ @@ -95,6 +99,15 @@ pub fn random_header_range( headers } +/// Generate a random [`BlockWithParent`]. +pub fn random_block_with_parent( + rng: &mut R, + number: u64, + parent: Option, +) -> BlockWithParent { + BlockWithParent { parent: parent.unwrap_or_default(), block: NumHash::new(number, rng.gen()) } +} + /// Generate a random [`SealedHeader`]. /// /// The header is assumed to not be correct if validated. From 5ee776a2ee9fbecc35558f5dc0d31f8459af8658 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 06:04:11 +0400 Subject: [PATCH 961/970] chore: pass primiitves generic to `EngineApiTreeHandler` fields (#13256) --- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/block_buffer.rs | 34 +++++---- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 6 +- crates/engine/tree/src/engine.rs | 14 ++-- crates/engine/tree/src/tree/mod.rs | 88 +++++++++++----------- 6 files changed, 76 insertions(+), 69 deletions(-) diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 3fa6de2b402..07ecedf882f 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -31,6 +31,7 @@ reth-consensus.workspace = true reth-node-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 5d4ca2705cb..994ed82cfb9 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -1,6 +1,8 @@ use crate::metrics::BlockBufferMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; +use reth_node_types::Block; use reth_primitives::SealedBlockWithSenders; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -16,9 +18,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block /// is done by last recently used block. #[derive(Debug)] -pub struct BlockBuffer { +pub struct BlockBuffer { /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap, + pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) /// to the buffered children. /// Allows connecting buffered blocks by parent. @@ -35,7 +37,7 @@ pub struct BlockBuffer { pub(crate) metrics: BlockBufferMetrics, } -impl BlockBuffer { +impl BlockBuffer { /// Create new buffer with max limit of blocks pub fn new(limit: u32) -> Self { Self { @@ -48,37 +50,37 @@ impl BlockBuffer { } /// Return reference to buffered blocks - pub const fn blocks(&self) -> &HashMap { + pub const fn blocks(&self) -> &HashMap> { &self.blocks } /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { let mut current_block = self.blocks.get(hash)?; - while let Some(parent) = self.blocks.get(¤t_block.parent_hash) { + while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; } Some(current_block) } /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: SealedBlockWithSenders) { + pub fn insert_block(&mut self, block: SealedBlockWithSenders) { let hash = block.hash(); - self.parent_to_child.entry(block.parent_hash).or_default().insert(hash); - self.earliest_blocks.entry(block.number).or_default().insert(hash); + self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); + self.earliest_blocks.entry(block.number()).or_default().insert(hash); self.blocks.insert(hash, block); if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { // evict the block if limit is hit if let Some(evicted_block) = self.remove_block(&evicted_hash) { // evict the block if limit is hit - self.remove_from_parent(evicted_block.parent_hash, &evicted_hash); + self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); } } self.metrics.blocks.set(self.blocks.len() as f64); @@ -93,7 +95,7 @@ impl BlockBuffer { pub fn remove_block_with_children( &mut self, parent_hash: &BlockHash, - ) -> Vec { + ) -> Vec> { let removed = self .remove_block(parent_hash) .into_iter() @@ -152,16 +154,16 @@ impl BlockBuffer { /// This method will only remove the block if it's present inside `self.blocks`. /// The block might be missing from other collections, the method will only ensure that it has /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option { + fn remove_block(&mut self, hash: &BlockHash) -> Option> { let block = self.blocks.remove(hash)?; - self.remove_from_earliest_blocks(block.number, hash); - self.remove_from_parent(block.parent_hash, hash); + self.remove_from_earliest_blocks(block.number(), hash); + self.remove_from_parent(block.parent_hash(), hash); self.lru.remove(hash); Some(block) } /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec { + fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { // remove all parent child connection and all the child children blocks that are connected // to the discarded parent blocks. let mut remove_parent_children = parent_hashes; diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 0bdc77dbe4b..b4dd47c43ab 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -52,7 +52,7 @@ where /// Processes requests. /// /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler>, + handler: EngineApiRequestHandler>, /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. incoming_requests: EngineMessageStream, } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 27de4a63605..c6a87ea076f 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -17,7 +17,7 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; -use reth_node_types::{BlockTy, NodeTypesWithEngine}; +use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; @@ -37,7 +37,9 @@ pub type EngineMessageStream = Pin = ChainOrchestrator< EngineHandler< - EngineApiRequestHandler::Engine>>, + EngineApiRequestHandler< + EngineApiRequest<::Engine, ::Primitives>, + >, EngineMessageStream<::Engine>, BasicBlockDownloader, >, diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 947d025e9ab..2f0415a1013 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -238,14 +238,14 @@ impl EngineApiKind { /// The request variants that the engine API handler can receive. #[derive(Debug)] -pub enum EngineApiRequest { +pub enum EngineApiRequest { /// A request received from the consensus engine. Beacon(BeaconEngineMessage), /// Request to insert an already executed block, e.g. via payload building. - InsertExecutedBlock(ExecutedBlock), + InsertExecutedBlock(ExecutedBlock), } -impl Display for EngineApiRequest { +impl Display for EngineApiRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Beacon(msg) => msg.fmt(f), @@ -256,14 +256,16 @@ impl Display for EngineApiRequest { } } -impl From> for EngineApiRequest { +impl From> for EngineApiRequest { fn from(msg: BeaconEngineMessage) -> Self { Self::Beacon(msg) } } -impl From> for FromEngine> { - fn from(req: EngineApiRequest) -> Self { +impl From> + for FromEngine> +{ + fn from(req: EngineApiRequest) -> Self { Self::Request(req) } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index ce9bddd90c1..29c382c2885 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -36,8 +36,8 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, - SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, @@ -53,7 +53,6 @@ use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, - marker::PhantomData, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -129,7 +128,7 @@ impl TreeState { } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option>> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -386,19 +385,19 @@ impl TreeState { /// /// This type is not shareable. #[derive(Debug)] -pub struct EngineApiTreeState { +pub struct EngineApiTreeState { /// Tracks the state of the blockchain tree. - tree_state: TreeState, + tree_state: TreeState, /// Tracks the forkchoice state updates received by the CL. forkchoice_state_tracker: ForkchoiceStateTracker, /// Buffer of detached blocks. - buffer: BlockBuffer, + buffer: BlockBuffer, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, } -impl EngineApiTreeState { +impl EngineApiTreeState { fn new( block_buffer_limit: u32, max_invalid_header_cache_length: u32, @@ -474,10 +473,10 @@ where { provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. - state: EngineApiTreeState, + state: EngineApiTreeState, /// The half for sending messages to the engine. /// /// This is kept so that we can queue in messages to ourself that we can process later, for @@ -486,20 +485,20 @@ where /// them one by one so that we can handle incoming engine API in between and don't become /// unresponsive. This can happen during live sync transition where we're trying to close the /// gap (up to 3 epochs of blocks in the worst case). - incoming_tx: Sender>>, + incoming_tx: Sender>>, /// Incoming engine API requests. - incoming: Receiver>>, + incoming: Receiver>>, /// Outgoing events that are emitted to the handler. - outgoing: UnboundedSender, + outgoing: UnboundedSender>, /// Channels to the persistence layer. - persistence: PersistenceHandle, + persistence: PersistenceHandle, /// Tracks the state changes of the persistence task. persistence_state: PersistenceState, /// Flag indicating the state of the node's backfill synchronization process. backfill_sync_state: BackfillSyncState, /// Keeps track of the state of the canonical chain that isn't persisted yet. /// This is intended to be accessed from external sources, such as rpc. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, /// Handle to the payload builder that will receive payload attributes for valid forkchoice /// updates payload_builder: PayloadBuilderHandle, @@ -511,8 +510,6 @@ where invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, - /// Captures the types the engine operates on - _primtives: PhantomData, } impl std::fmt::Debug @@ -546,6 +543,7 @@ where N: NodePrimitives< Block = reth_primitives::Block, BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, >, P: DatabaseProviderFactory @@ -562,16 +560,16 @@ where V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments)] pub fn new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, - outgoing: UnboundedSender, - state: EngineApiTreeState, - canonical_in_memory_state: CanonicalInMemoryState, - persistence: PersistenceHandle, + outgoing: UnboundedSender>, + state: EngineApiTreeState, + canonical_in_memory_state: CanonicalInMemoryState, + persistence: PersistenceHandle, persistence_state: PersistenceState, payload_builder: PayloadBuilderHandle, config: TreeConfig, @@ -597,7 +595,6 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, - _primtives: Default::default(), } } @@ -611,19 +608,19 @@ where /// /// Returns the sender through which incoming requests can be sent to the task and the receiver /// end of a [`EngineApiEvent`] unbounded channel to receive events from the engine. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::complexity)] pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, - persistence: PersistenceHandle, + persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, invalid_block_hook: Box>, kind: EngineApiKind, - ) -> (Sender>>, UnboundedReceiver) { + ) -> (Sender>>, UnboundedReceiver>) { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -661,7 +658,7 @@ where } /// Returns a new [`Sender`] to send messages to this type. - pub fn sender(&self) -> Sender>> { + pub fn sender(&self) -> Sender>> { self.incoming_tx.clone() } @@ -859,7 +856,7 @@ where /// /// Note: This does not update the tracked state and instead returns the new chain based on the /// given head. - fn on_new_head(&self, new_head: B256) -> ProviderResult> { + fn on_new_head(&self, new_head: B256) -> ProviderResult>> { // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { return Ok(None) @@ -1133,7 +1130,7 @@ where /// Returns an error if the engine channel is disconnected. fn try_recv_engine_message( &self, - ) -> Result>>, RecvError> { + ) -> Result>>, RecvError> { if self.persistence_state.in_progress() { // try to receive the next request with a timeout to not block indefinitely match self.incoming.recv_timeout(std::time::Duration::from_millis(500)) { @@ -1211,7 +1208,7 @@ where /// Handles a message from the engine. fn on_engine_message( &mut self, - msg: FromEngine>, + msg: FromEngine>, ) -> Result<(), InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { @@ -1452,7 +1449,7 @@ where } /// Emits an outgoing event to the engine. - fn emit_event(&mut self, event: impl Into) { + fn emit_event(&mut self, event: impl Into>) { let event = event.into(); if event.is_backfill_action() { @@ -1496,7 +1493,7 @@ where /// Returns a batch of consecutive canonical blocks to persist in the range /// `(last_persisted_number .. canonical_head - threshold]` . The expected /// order is oldest -> newest. - fn get_canonical_blocks_to_persist(&self) -> Vec { + fn get_canonical_blocks_to_persist(&self) -> Vec> { let mut blocks_to_persist = Vec::new(); let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; @@ -1549,7 +1546,7 @@ where /// has in memory. /// /// For finalized blocks, this will return `None`. - fn executed_block_by_hash(&self, hash: B256) -> ProviderResult> { + fn executed_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first let block = self.state.tree_state.executed_block_by_hash(hash).cloned(); @@ -1595,7 +1592,7 @@ where } /// Return block from database or in-memory state by hash. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { // check database first let mut block = self.provider.block_by_hash(hash)?; if block.is_none() { @@ -1754,7 +1751,10 @@ where /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + fn validate_block( + &self, + block: &SealedBlockWithSenders, + ) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { error!( target: "engine::tree", @@ -1951,7 +1951,7 @@ where /// If either of these are true, then this returns the height of the first block. Otherwise, /// this returns [`None`]. This should be used to check whether or not we should be sending a /// remove command to the persistence task. - fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { + fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None }; let BlockNumHash { number: new_num, hash: new_hash } = @@ -1978,7 +1978,7 @@ where /// Invoked when we the canonical chain has been updated. /// /// This is invoked on a valid forkchoice update, or if we can make the target block canonical. - fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { + fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { trace!(target: "engine::tree", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count(), "applying new chain update"); let start = Instant::now(); @@ -2030,7 +2030,7 @@ where } /// This reinserts any blocks in the new chain that do not already exist in the tree - fn reinsert_reorged_blocks(&mut self, new_chain: Vec) { + fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); @@ -2296,7 +2296,7 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed: ExecutedBlock = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), @@ -2636,7 +2636,7 @@ mod tests { use reth_engine_primitives::ForkchoiceStatus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::{BlockExt, EthPrimitives}; + use reth_primitives::{Block, BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2708,7 +2708,7 @@ mod tests { EthEngineTypes, EthereumEngineValidator, >, - to_tree_tx: Sender>>, + to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, From b3752cd2e8c94ad447b01cd5514793858a051adc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 22:38:26 -0500 Subject: [PATCH 962/970] feat: remove default types from headers stage (#13258) Co-authored-by: Arsenii Kulikov --- crates/stages/stages/src/stages/headers.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 7ca9cae590b..2a104d7eb6b 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -4,7 +4,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; use reth_consensus::HeaderValidator; -use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; +use reth_db::{table::Value, tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::DbTxMut, @@ -13,7 +13,7 @@ use reth_db_api::{ use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; use reth_primitives::{NodePrimitives, SealedHeader, StaticFileSegment}; -use reth_primitives_traits::serde_bincode_compat; +use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader}; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, StaticFileProviderFactory, @@ -93,11 +93,9 @@ where /// database table. fn write_headers

(&mut self, provider: &P) -> Result where - P: DBProvider - + StaticFileProviderFactory< - Primitives: NodePrimitives, - >, + P: DBProvider + StaticFileProviderFactory, Downloader: HeaderDownloader

::BlockHeader>, + ::BlockHeader: Value + FullBlockHeader, { let total_headers = self.header_collector.len(); @@ -145,8 +143,8 @@ where self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { StageError::Block { block: Box::new(BlockWithParent::new( - header.parent_hash, - NumHash::new(header.number, header_hash), + header.parent_hash(), + NumHash::new(header.number(), header_hash), )), error: BlockErrorKind::Validation(error), } @@ -203,9 +201,9 @@ where impl Stage for HeaderStage where Provider: DBProvider + StaticFileProviderFactory, - Provider::Primitives: NodePrimitives, P: HeaderSyncGapProvider
::BlockHeader>, D: HeaderDownloader
::BlockHeader>, + ::BlockHeader: FullBlockHeader + Value, { /// Return the id of the stage fn id(&self) -> StageId { From d856c8e5bce7b099448a1dad756f4b41f4a6b70e Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Tue, 10 Dec 2024 05:39:43 -0300 Subject: [PATCH 963/970] feat(engine): add error logging in `state_hook` (#13252) Co-authored-by: Federico Gimenez --- crates/engine/tree/src/tree/root.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 53a881387e7..e2ed6aa1470 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -283,7 +283,9 @@ where let state_hook = StateHookSender::new(self.tx.clone()); move |state: &EvmState| { - let _ = state_hook.send(StateRootMessage::StateUpdate(state.clone())); + if let Err(error) = state_hook.send(StateRootMessage::StateUpdate(state.clone())) { + error!(target: "engine::root", ?error, "Failed to send state update"); + } } } From da99986ea2e9914943715cc635bc9209f179d492 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 13:06:39 +0400 Subject: [PATCH 964/970] feat: relax bounds for `EngineApiTreeHandler` (#13257) Co-authored-by: Matthias Seitz --- Cargo.lock | 3 + crates/blockchain-tree-api/Cargo.toml | 2 + crates/blockchain-tree-api/src/error.rs | 52 +++---- crates/consensus/beacon/src/engine/event.rs | 6 +- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 1 + crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/src/engine.rs | 36 ++--- crates/engine/tree/src/tree/mod.rs | 151 +++++++++++--------- 9 files changed, 139 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08259a54aba..3bcc998cf21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6712,11 +6712,13 @@ dependencies = [ name = "reth-blockchain-tree-api" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "thiserror 2.0.5", ] @@ -7408,6 +7410,7 @@ dependencies = [ "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index b1c01f85938..83ae378090b 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -14,9 +14,11 @@ workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-errors.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index 4dd42c889a3..92866b4d4da 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -1,11 +1,13 @@ //! Error handling for the blockchain tree +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::ConsensusError; use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; -use reth_primitives::SealedBlock; +use reth_primitives::{SealedBlock, SealedBlockFor}; +use reth_primitives_traits::{Block, BlockBody}; pub use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. @@ -210,48 +212,48 @@ impl InsertBlockErrorData { } } -struct InsertBlockErrorDataTwo { - block: SealedBlock, +struct InsertBlockErrorDataTwo { + block: SealedBlockFor, kind: InsertBlockErrorKindTwo, } -impl std::fmt::Display for InsertBlockErrorDataTwo { +impl std::fmt::Display for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Failed to insert block (hash={}, number={}, parent_hash={}): {}", self.block.hash(), - self.block.number, - self.block.parent_hash, + self.block.number(), + self.block.parent_hash(), self.kind ) } } -impl std::fmt::Debug for InsertBlockErrorDataTwo { +impl std::fmt::Debug for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InsertBlockError") .field("error", &self.kind) .field("hash", &self.block.hash()) - .field("number", &self.block.number) - .field("parent_hash", &self.block.parent_hash) - .field("num_txs", &self.block.body.transactions.len()) + .field("number", &self.block.number()) + .field("parent_hash", &self.block.parent_hash()) + .field("num_txs", &self.block.body.transactions().len()) .finish_non_exhaustive() } } -impl core::error::Error for InsertBlockErrorDataTwo { +impl core::error::Error for InsertBlockErrorDataTwo { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { Some(&self.kind) } } -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { +impl InsertBlockErrorDataTwo { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { block, kind } } - fn boxed(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Box { + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { Box::new(Self::new(block, kind)) } } @@ -259,36 +261,36 @@ impl InsertBlockErrorDataTwo { /// Error thrown when inserting a block failed because the block is considered invalid. #[derive(thiserror::Error)] #[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box, +pub struct InsertBlockErrorTwo { + inner: Box>, } // === impl InsertBlockErrorTwo === -impl InsertBlockErrorTwo { +impl InsertBlockErrorTwo { /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { + pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlock) -> Self { + pub fn sender_recovery_error(block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) } /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { + pub fn execution_error(error: BlockExecutionError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Execution(error)) } /// Consumes the error and returns the block that resulted in the error #[inline] - pub fn into_block(self) -> SealedBlock { + pub fn into_block(self) -> SealedBlockFor { self.inner.block } @@ -300,19 +302,19 @@ impl InsertBlockErrorTwo { /// Returns the block that resulted in the error #[inline] - pub const fn block(&self) -> &SealedBlock { + pub const fn block(&self) -> &SealedBlockFor { &self.inner.block } /// Consumes the type and returns the block and error kind. #[inline] - pub fn split(self) -> (SealedBlock, InsertBlockErrorKindTwo) { + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { let inner = *self.inner; (inner.block, inner.kind) } } -impl std::fmt::Debug for InsertBlockErrorTwo { +impl std::fmt::Debug for InsertBlockErrorTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index b503e1e102a..acf056b3ff4 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -2,7 +2,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlockFor, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -15,9 +15,9 @@ pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc>, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc>, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index b4dd47c43ab..57fdc0c254e 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -52,7 +52,7 @@ where /// Processes requests. /// /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler>, + handler: EngineApiRequestHandler, N::Primitives>, /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. incoming_requests: EngineMessageStream, } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index c6a87ea076f..5dfe4184257 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -39,6 +39,7 @@ type EngineServiceType = ChainOrchestrator< EngineHandler< EngineApiRequestHandler< EngineApiRequest<::Engine, ::Primitives>, + ::Primitives, >, EngineMessageStream<::Engine>, BasicBlockDownloader, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 67cb72850ae..f428c8771cb 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -26,6 +26,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true @@ -108,6 +109,7 @@ test-utils = [ "reth-network-p2p/test-utils", "reth-payload-builder/test-utils", "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-prune-types", "reth-prune-types?/test-utils", diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 2f0415a1013..9fa0a8c1d21 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -11,6 +11,7 @@ use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::Block; use std::{ collections::HashSet, fmt::Display, @@ -66,7 +67,7 @@ impl EngineHandler { impl ChainHandler for EngineHandler where - T: EngineRequestHandler, + T: EngineRequestHandler, S: Stream + Send + Sync + Unpin + 'static, ::Item: Into, D: BlockDownloader, @@ -139,9 +140,11 @@ pub trait EngineRequestHandler: Send + Sync { type Event: Send; /// The request type this handler can process. type Request; + /// Type of the block sent in [`FromEngine::DownloadedBlocks`] variant. + type Block: Block; /// Informs the handler about an event from the [`EngineHandler`]. - fn on_event(&mut self, event: FromEngine); + fn on_event(&mut self, event: FromEngine); /// Advances the handler. fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; @@ -167,31 +170,32 @@ pub trait EngineRequestHandler: Send + Sync { /// In case required blocks are missing, the handler will request them from the network, by emitting /// a download request upstream. #[derive(Debug)] -pub struct EngineApiRequestHandler { +pub struct EngineApiRequestHandler { /// channel to send messages to the tree to execute the payload. - to_tree: Sender>, + to_tree: Sender>, /// channel to receive messages from the tree. - from_tree: UnboundedReceiver, + from_tree: UnboundedReceiver>, } -impl EngineApiRequestHandler { +impl EngineApiRequestHandler { /// Creates a new `EngineApiRequestHandler`. pub const fn new( - to_tree: Sender>, - from_tree: UnboundedReceiver, + to_tree: Sender>, + from_tree: UnboundedReceiver>, ) -> Self { Self { to_tree, from_tree } } } -impl EngineRequestHandler for EngineApiRequestHandler +impl EngineRequestHandler for EngineApiRequestHandler where Request: Send, { - type Event = BeaconConsensusEngineEvent; + type Event = BeaconConsensusEngineEvent; type Request = Request; + type Block = N::Block; - fn on_event(&mut self, event: FromEngine) { + fn on_event(&mut self, event: FromEngine) { // delegate to the tree let _ = self.to_tree.send(event); } @@ -263,7 +267,7 @@ impl From> for EngineA } impl From> - for FromEngine> + for FromEngine, N::Block> { fn from(req: EngineApiRequest) -> Self { Self::Request(req) @@ -297,16 +301,16 @@ impl From> for EngineApiEvent { +pub enum FromEngine { /// Event from the top level orchestrator. Event(FromOrchestrator), /// Request from the engine. Request(Req), /// Downloaded blocks from the network. - DownloadedBlocks(Vec), + DownloadedBlocks(Vec>), } -impl Display for FromEngine { +impl Display for FromEngine { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Event(ev) => write!(f, "Event({ev:?})"), @@ -318,7 +322,7 @@ impl Display for FromEngine { } } -impl From for FromEngine { +impl From for FromEngine { fn from(event: FromOrchestrator) -> Self { Self::Event(event) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 29c382c2885..234a96a47d0 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -5,7 +5,7 @@ use crate::{ persistence::PersistenceHandle, tree::metrics::EngineApiMetrics, }; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -36,9 +36,10 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, }; +use reth_primitives_traits::Block; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, @@ -485,9 +486,9 @@ where /// them one by one so that we can handle incoming engine API in between and don't become /// unresponsive. This can happen during live sync transition where we're trying to close the /// gap (up to 3 epochs of blocks in the worst case). - incoming_tx: Sender>>, + incoming_tx: Sender, N::Block>>, /// Incoming engine API requests. - incoming: Receiver>>, + incoming: Receiver, N::Block>>, /// Outgoing events that are emitted to the handler. outgoing: UnboundedSender>, /// Channels to the persistence layer. @@ -540,24 +541,20 @@ where impl EngineApiTreeHandler where - N: NodePrimitives< - Block = reth_primitives::Block, - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, + N: NodePrimitives, P: DatabaseProviderFactory + BlockReader + StateProviderFactory - + StateReader + + StateReader + StateCommitmentProvider + HashedPostStateProvider + Clone + 'static, -

::Provider: BlockReader, +

::Provider: + BlockReader, E: BlockExecutorProvider, T: EngineTypes, - V: EngineValidator, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[expect(clippy::too_many_arguments)] @@ -620,7 +617,8 @@ where config: TreeConfig, invalid_block_hook: Box>, kind: EngineApiKind, - ) -> (Sender>>, UnboundedReceiver>) { + ) -> (Sender, N::Block>>, UnboundedReceiver>) + { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -658,7 +656,7 @@ where } /// Returns a new [`Sender`] to send messages to this type. - pub fn sender(&self) -> Sender>> { + pub fn sender(&self) -> Sender, N::Block>> { self.incoming_tx.clone() } @@ -698,7 +696,7 @@ where /// block request processing isn't blocked for a long time. fn on_downloaded( &mut self, - mut blocks: Vec, + mut blocks: Vec>, ) -> Result, InsertBlockFatalError> { if blocks.is_empty() { // nothing to execute @@ -797,7 +795,7 @@ where let block_hash = block.hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash; + lowest_buffered_ancestor = block.parent_hash(); } // now check the block itself @@ -862,11 +860,11 @@ where return Ok(None) }; - let new_head_number = new_head_block.block.number; + let new_head_number = new_head_block.block.number(); let mut current_canonical_number = self.state.tree_state.current_canonical_head.number; let mut new_chain = vec![new_head_block.clone()]; - let mut current_hash = new_head_block.block.parent_hash; + let mut current_hash = new_head_block.block.parent_hash(); let mut current_number = new_head_number - 1; // Walk back the new chain until we reach a block we know about @@ -875,7 +873,7 @@ where // that are _above_ the current canonical head. while current_number > current_canonical_number { if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); current_number -= 1; new_chain.push(block); } else { @@ -904,7 +902,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -920,7 +918,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -929,7 +927,7 @@ where } if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); new_chain.push(block); } else { // This shouldn't happen as we've already walked this path @@ -958,10 +956,10 @@ where return Ok(false) } // We already passed the canonical head - if current_block.number <= canonical_head.number { + if current_block.number() <= canonical_head.number { break } - current_hash = current_block.parent_hash; + current_hash = current_block.parent_hash(); } // verify that the given hash is not already part of canonical chain stored in memory @@ -1040,7 +1038,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); + let updated = self.process_payload_attributes(attr, tip.header(), state, version); return Ok(TreeOutcome::new(updated)) } @@ -1069,13 +1067,13 @@ where // 3. check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { - debug!(target: "engine::tree", head = canonical_header.number, "fcu head block is already canonical"); + debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); // For OpStack the proposers are allowed to reorg their own chain at will, so we need to // always trigger a new payload job if requested. if self.engine_kind.is_opstack() { if let Some(attr) = attrs { - debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); + debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); let updated = self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) @@ -1128,9 +1126,10 @@ where /// received in time. /// /// Returns an error if the engine channel is disconnected. + #[expect(clippy::type_complexity)] fn try_recv_engine_message( &self, - ) -> Result>>, RecvError> { + ) -> Result, N::Block>>, RecvError> { if self.persistence_state.in_progress() { // try to receive the next request with a timeout to not block indefinitely match self.incoming.recv_timeout(std::time::Duration::from_millis(500)) { @@ -1208,7 +1207,7 @@ where /// Handles a message from the engine. fn on_engine_message( &mut self, - msg: FromEngine>, + msg: FromEngine, N::Block>, ) -> Result<(), InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { @@ -1384,7 +1383,7 @@ where .state .buffer .block(&sync_target_state.finalized_block_hash) - .map(|block| block.number); + .map(|block| block.number()); // The block number that the backfill finished at - if the progress or newest // finalized is None then we can't check the distance anyways. @@ -1505,15 +1504,15 @@ where debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist"); while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - if block.block.number <= last_persisted_number { + if block.block.number() <= last_persisted_number { break; } - if block.block.number <= target_number { + if block.block.number() <= target_number { blocks_to_persist.push(block.clone()); } - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); } // reverse the order so that the oldest block comes first @@ -1579,7 +1578,10 @@ where } /// Return sealed block from database or in-memory state by hash. - fn sealed_header_by_hash(&self, hash: B256) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + hash: B256, + ) -> ProviderResult>> { // check memory first let block = self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); @@ -1649,7 +1651,7 @@ where self.state .buffer .lowest_ancestor(&hash) - .map(|block| block.parent_hash) + .map(|block| block.parent_hash()) .unwrap_or_else(|| hash) } @@ -1696,7 +1698,7 @@ where // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Some(parent) = self.block_by_hash(parent_hash)? { - if !parent.is_zero_difficulty() { + if !parent.header().difficulty().is_zero() { parent_hash = B256::ZERO; } } @@ -1823,8 +1825,8 @@ where /// Returns an error if sender recovery failed or inserting into the buffer failed. fn buffer_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result<(), InsertBlockErrorTwo> { + block: SealedBlockFor, + ) -> Result<(), InsertBlockErrorTwo> { match block.try_seal_with_senders() { Ok(block) => self.buffer_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -1832,7 +1834,10 @@ where } /// Pre-validates the block and inserts it into the buffer. - fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockErrorTwo> { + fn buffer_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result<(), InsertBlockErrorTwo> { if let Err(err) = self.validate_block(&block) { return Err(InsertBlockErrorTwo::consensus_error(err, block.block)) } @@ -1886,7 +1891,7 @@ where // if we have buffered the finalized block, we should check how far // we're off exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number); + self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()); } // If this is invoked after we downloaded a block we can check if this block is the @@ -2011,7 +2016,7 @@ where self.canonical_in_memory_state.set_canonical_head(tip.clone()); // Update metrics based on new tip - self.metrics.tree.canonical_chain_height.set(tip.number as f64); + self.metrics.tree.canonical_chain_height.set(tip.number() as f64); // sends an event to all active listeners about the new canonical chain self.canonical_in_memory_state.notify_canon_state(notification); @@ -2033,7 +2038,7 @@ where fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { - trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); + trace!(target: "engine::tree", num=?block.block.number(), hash=?block.block.hash(), "Reinserting block into tree state"); self.state.tree_state.insert_executed(block); } } @@ -2086,10 +2091,10 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number,), target = "engine::tree")] + #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] fn on_downloaded_block( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result, InsertBlockFatalError> { let block_num_hash = block.num_hash(); let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); @@ -2147,8 +2152,8 @@ where fn insert_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result { + block: SealedBlockFor, + ) -> Result> { match block.try_seal_with_senders() { Ok(block) => self.insert_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -2157,17 +2162,17 @@ where fn insert_block( &mut self, - block: SealedBlockWithSenders, - ) -> Result { + block: SealedBlockWithSenders, + ) -> Result> { self.insert_block_inner(block.clone()) .map_err(|kind| InsertBlockErrorTwo::new(block.block, kind)) } fn insert_block_inner( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash, state_root = ?block.state_root, "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) @@ -2179,14 +2184,14 @@ where // validate block consensus rules self.validate_block(&block)?; - trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash, "Fetching block state provider"); - let Some(state_provider) = self.state_provider(block.parent_hash)? else { + trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash(), "Fetching block state provider"); + let Some(state_provider) = self.state_provider(block.parent_hash())? else { // we don't have the state required to execute this block, buffering it and find the // missing parent block let missing_ancestor = self .state .buffer - .lowest_ancestor(&block.parent_hash) + .lowest_ancestor(&block.parent_hash()) .map(|block| block.parent_num_hash()) .unwrap_or_else(|| block.parent_num_hash()); @@ -2199,9 +2204,9 @@ where }; // now validate against the parent - let parent_block = self.sealed_header_by_hash(block.parent_hash)?.ok_or_else(|| { + let parent_block = self.sealed_header_by_hash(block.parent_hash())?.ok_or_else(|| { InsertBlockErrorKindTwo::Provider(ProviderError::HeaderNotFound( - block.parent_hash.into(), + block.parent_hash().into(), )) })?; if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { @@ -2212,7 +2217,7 @@ where trace!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - let block_number = block.number; + let block_number = block.number(); let block_hash = block.hash(); let sealed_block = Arc::new(block.block.clone()); let block = block.unseal(); @@ -2260,7 +2265,7 @@ where let persistence_in_progress = self.persistence_state.in_progress(); if !persistence_in_progress { state_root_result = match self - .compute_state_root_parallel(block.parent_hash, &hashed_state) + .compute_state_root_parallel(block.header().parent_hash(), &hashed_state) { Ok((state_root, trie_output)) => Some((state_root, trie_output)), Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { @@ -2278,7 +2283,7 @@ where state_provider.state_root_with_updates(hashed_state.clone())? }; - if state_root != block.state_root { + if state_root != block.header().state_root() { // call post-block hook self.invalid_block_hook.on_invalid_block( &parent_block, @@ -2287,7 +2292,7 @@ where Some((&trie_output, state_root)), ); return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), + GotExpected { got: state_root, expected: block.header().state_root() }.into(), ) .into()) } @@ -2304,7 +2309,7 @@ where trie: Arc::new(trie_output), }; - if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash { + if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash() { debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block"); // if the parent is the canonical head, we can insert the block as the pending block self.canonical_in_memory_state.set_pending_block(executed.clone()); @@ -2375,7 +2380,7 @@ where /// Returns the proper payload status response if the block is invalid. fn on_insert_block_error( &mut self, - error: InsertBlockErrorTwo, + error: InsertBlockErrorTwo, ) -> Result { let (block, error) = error.split(); @@ -2386,12 +2391,12 @@ where // If the error was due to an invalid payload, the payload is added to the // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is // returned. - warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number, %validation_err, "Invalid block error on new payload"); + warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number(), %validation_err, "Invalid block error on new payload"); let latest_valid_hash = if validation_err.is_block_pre_merge() { // zero hash must be returned if block is pre-merge Some(B256::ZERO) } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? + self.latest_valid_hash_for_invalid_payload(block.parent_hash())? }; // keep track of the invalid header @@ -2403,7 +2408,10 @@ where } /// Attempts to find the header for the given block hash if it is canonical. - pub fn find_canonical_header(&self, hash: B256) -> Result, ProviderError> { + pub fn find_canonical_header( + &self, + hash: B256, + ) -> Result>, ProviderError> { let mut canonical = self.canonical_in_memory_state.header_by_hash(hash); if canonical.is_none() { @@ -2434,7 +2442,7 @@ where { // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: - let _ = self.persistence.save_finalized_block_number(finalized.number); + let _ = self.persistence.save_finalized_block_number(finalized.number()); self.canonical_in_memory_state.set_finalized(finalized); } } @@ -2462,7 +2470,7 @@ where if Some(safe.num_hash()) != self.canonical_in_memory_state.get_safe_num_hash() { // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: - let _ = self.persistence.save_safe_block_number(safe.number); + let _ = self.persistence.save_safe_block_number(safe.number()); self.canonical_in_memory_state.set_safe(safe); } } @@ -2537,7 +2545,7 @@ where fn process_payload_attributes( &self, attrs: T::PayloadAttributes, - head: &Header, + head: &N::BlockHeader, state: ForkchoiceState, version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { @@ -2626,6 +2634,7 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; + use alloy_consensus::Header; use alloy_primitives::Bytes; use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; @@ -2708,7 +2717,7 @@ mod tests { EthEngineTypes, EthereumEngineValidator, >, - to_tree_tx: Sender>>, + to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, @@ -2843,7 +2852,7 @@ mod tests { fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result { + ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); self.tree.provider.add_state_root(block.state_root); From 15470b43502da858ccef99af203e8408b1422849 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 10 Dec 2024 04:29:51 -0500 Subject: [PATCH 965/970] feat: make bodies downloader generic over header (#13259) --- crates/net/downloaders/src/bodies/bodies.rs | 93 ++++++++++---------- crates/net/downloaders/src/bodies/noop.rs | 14 ++- crates/net/downloaders/src/bodies/queue.rs | 21 +++-- crates/net/downloaders/src/bodies/request.rs | 22 ++--- crates/net/downloaders/src/bodies/task.rs | 21 +++-- crates/net/p2p/src/bodies/downloader.rs | 7 +- crates/net/p2p/src/bodies/response.rs | 5 +- crates/node/builder/src/setup.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 17 ++-- crates/stages/stages/src/stages/utils.rs | 2 +- 10 files changed, 112 insertions(+), 92 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index bdf2aca9c77..54026070ec8 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -35,11 +35,11 @@ use tracing::info; /// All blocks in a batch are fetched at the same time. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] -pub struct BodiesDownloader { +pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -57,11 +57,11 @@ pub struct BodiesDownloader { /// The latest block number returned. latest_queued_block_number: Option, /// Requests in progress - in_progress_queue: BodiesRequestQueue, + in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap>, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec>, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } @@ -69,7 +69,7 @@ pub struct BodiesDownloader { impl BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. fn next_headers_request(&self) -> DownloadResult>>> { @@ -193,14 +193,16 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec>) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option> { + fn pop_buffered_response( + &mut self, + ) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -210,13 +212,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response( - &mut self, - response: Vec>, - ) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -230,9 +229,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered( - &mut self, - ) -> Option>> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -258,9 +255,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch( - &mut self, - ) -> Option>> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -292,12 +287,17 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader<::Body> { + pub fn into_task( + self, + ) -> TaskDownloader<::Header, ::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader<::Body> + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header, ::Body> where S: TaskSpawner, { @@ -308,8 +308,9 @@ where impl BodyDownloader for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider

+ Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { + type Header = Provider::Header; type Body = B::Body; /// Set a new download range (exclusive). @@ -358,9 +359,9 @@ where impl Stream for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider
+ Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -442,13 +443,28 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec>, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { + #[inline] + fn len(&self) -> usize { + self.resp.len() + } + + /// Returns the size of the response in bytes + /// + /// See [`BlockResponse::size`] + #[inline] + const fn size(&self) -> usize { + self.size + } +} + +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -464,36 +480,23 @@ impl OrderedBodiesResponse { fn block_range(&self) -> RangeInclusive { self.first_block_number()..=self.resp.last().expect("is not empty").block_number() } - - #[inline] - fn len(&self) -> usize { - self.resp.len() - } - - /// Returns the size of the response in bytes - /// - /// See [`BlockResponse::size`] - #[inline] - const fn size(&self) -> usize { - self.size - } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -573,7 +576,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc>, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index dd3e6e9691b..b7a9431a4d7 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -9,18 +9,24 @@ use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader(std::marker::PhantomData); +pub struct NoopBodiesDownloader { + _header: std::marker::PhantomData, + _body: std::marker::PhantomData, +} -impl BodyDownloader for NoopBodiesDownloader { +impl + BodyDownloader for NoopBodiesDownloader +{ type Body = B; + type Header = H; fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 5f1e8b059cf..ed8c425e611 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -1,5 +1,6 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; @@ -19,18 +20,19 @@ use std::{ /// The wrapper around [`FuturesUnordered`] that keeps information /// about the blocks currently being requested. #[derive(Debug)] -pub(crate) struct BodiesRequestQueue { +pub(crate) struct BodiesRequestQueue { /// Inner body request queue. - inner: FuturesUnordered>, + inner: FuturesUnordered>, /// The downloader metrics. metrics: BodyDownloaderMetrics, /// Last requested block number. pub(crate) last_requested_block_number: Option, } -impl BodiesRequestQueue +impl BodiesRequestQueue where B: BodiesClient + 'static, + H: BlockHeader, { /// Create new instance of request queue. pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self { @@ -58,15 +60,15 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc>, - request: Vec, + consensus: Arc>, + request: Vec>, ) { // Set last max requested block number self.last_requested_block_number = request .last() .map(|last| match self.last_requested_block_number { - Some(num) => last.number.max(num), - None => last.number, + Some(num) => last.number().max(num), + None => last.number(), }) .or(self.last_requested_block_number); // Create request and push into the queue. @@ -76,11 +78,12 @@ where } } -impl Stream for BodiesRequestQueue +impl Stream for BodiesRequestQueue where + H: BlockHeader + Send + Sync + Unpin + 'static, B: BodiesClient + 'static, { - type Item = DownloadResult>>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 28cfdb61b7c..92f46fa6fdd 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -38,30 +38,31 @@ use std::{ /// All errors regarding the response cause the peer to get penalized, meaning that adversaries /// that try to give us bodies that do not match the requested order are going to be penalized /// and eventually disconnected. -pub(crate) struct BodiesRequestFuture { +pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. response_metrics: ResponseMetrics, // Headers to download. The collection is shrunk as responses are buffered. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// Internal buffer for all blocks - buffer: Vec>, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, } -impl BodiesRequestFuture +impl BodiesRequestFuture where B: BodiesClient + 'static, + H: BlockHeader, { /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -76,7 +77,7 @@ where } } - pub(crate) fn with_headers(mut self, headers: Vec) -> Self { + pub(crate) fn with_headers(mut self, headers: Vec>) -> Self { self.buffer.reserve_exact(headers.len()); self.pending_headers = VecDeque::from(headers); // Submit the request only if there are any headers to download. @@ -192,7 +193,7 @@ where if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); - let number = block.number; + let number = block.number(); self.pending_headers.push_front(block.header); return Err(DownloadError::BodyValidation { hash, @@ -213,11 +214,12 @@ where } } -impl Future for BodiesRequestFuture +impl Future for BodiesRequestFuture where + H: BlockHeader + Unpin + Send + Sync + 'static, B: BodiesClient + 'static, { - type Output = DownloadResult>>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 89af9813e3c..9377be78676 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -24,15 +24,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -64,7 +64,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -73,7 +73,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -91,7 +91,10 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl + BodyDownloader for TaskDownloader +{ + type Header = H; type Body = B; fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { @@ -100,8 +103,8 @@ impl BodyDownloader for TaskDownloader } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -111,7 +114,7 @@ impl Stream for TaskDownloader { /// A [`BodyDownloader`] that runs on its own task struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender>, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index 06f35fc9bd6..b80a308d8a1 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,7 +5,7 @@ use futures::Stream; use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// @@ -13,8 +13,11 @@ pub type BodyDownloaderResult = DownloadResult> + Unpin + Send + Sync + Stream> + Unpin { + /// The type of header that can be returned in a blck + type Header: Debug + Send + Sync + Unpin + 'static; + /// The type of the body that is being downloaded. type Body: Debug + Send + Sync + Unpin + 'static; diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 02534ea0963..1b415246f54 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,10 +1,11 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; -use reth_primitives_traits::{BlockHeader, InMemorySize}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) Full(SealedBlock), /// The empty block response diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 0a0e4f10dbc..6dff28bd39b 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -87,7 +87,7 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
> + 'static, - B: BodyDownloader> + 'static, + B: BodyDownloader
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, N::Primitives: NodePrimitives, { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 88a1b96e249..0f311b1bc9e 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -5,7 +5,7 @@ use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; -use reth_primitives_traits::{Block, BlockBody}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader}; use reth_provider::{ providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, StorageLocation, @@ -56,7 +56,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>>, + buffer: Option>>, } impl BodyStage { @@ -72,9 +72,7 @@ impl BodyStage { unwind_block: Option, ) -> Result<(), StageError> where - Provider: DBProvider - + BlockReader
- + StaticFileProviderFactory, + Provider: DBProvider + BlockReader + StaticFileProviderFactory, { // Get id for the next tx_num of zero if there are no transactions. let next_tx_num = provider @@ -151,9 +149,9 @@ where Provider: DBProvider + StaticFileProviderFactory + StatsReader - + BlockReader
- + BlockWriter>, - D: BodyDownloader>, + + BlockReader + + BlockWriter>, + D: BodyDownloader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -764,6 +762,7 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Header = Header; type Body = BlockBody; fn set_download_range( @@ -786,7 +785,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index c2a7c6ede02..169d556348b 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -258,7 +258,7 @@ pub(crate) fn missing_static_data_error( segment: StaticFileSegment, ) -> Result where - Provider: BlockReader
+ StaticFileProviderFactory, + Provider: BlockReader + StaticFileProviderFactory, { let mut last_block = static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); From fdc9b3188a058288e9d25d69f94831cc0441a8bb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 10 Dec 2024 11:07:15 +0100 Subject: [PATCH 966/970] chore: use trait fn for blob gas (#13261) --- crates/primitives-traits/src/block/body.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index ed60796ce1b..20f1cb9c159 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; -use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Bytes, B256}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. @@ -73,12 +73,7 @@ pub trait BlockBody: /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. fn blob_gas_used(&self) -> u64 { - // TODO(mattss): simplify after - self.transactions() - .iter() - .filter_map(|tx| tx.blob_versioned_hashes()) - .map(|hashes| hashes.len() as u64 * DATA_GAS_PER_BLOB) - .sum() + self.transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } /// Returns an iterator over all blob versioned hashes in the block body. From 36c0142e583668b7dd58428002ad5d1773d8c4a6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 10 Dec 2024 12:10:47 +0100 Subject: [PATCH 967/970] chore: release 1.1.3 (#13262) --- Cargo.lock | 244 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bcc998cf21..92b089a0eaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2647,7 +2647,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5532,7 +5532,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "reth-cli-util", @@ -6479,7 +6479,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6552,7 +6552,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6636,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6672,7 +6672,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6710,7 +6710,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6725,7 +6725,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6754,7 +6754,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6775,7 +6775,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-genesis", "clap", @@ -6788,7 +6788,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.2" +version = "1.1.3" dependencies = [ "ahash", "alloy-consensus", @@ -6856,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-tasks", "tokio", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6883,7 +6883,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6906,7 +6906,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.2" +version = "1.1.3" dependencies = [ "convert_case", "proc-macro2", @@ -6917,7 +6917,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "eyre", @@ -6933,7 +6933,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6946,7 +6946,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6963,7 +6963,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6986,7 +6986,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7027,7 +7027,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7056,7 +7056,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7085,7 +7085,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7129,7 +7129,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7153,7 +7153,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7181,7 +7181,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7268,7 +7268,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.2" +version = "1.1.3" dependencies = [ "aes", "alloy-primitives", @@ -7298,7 +7298,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7330,7 +7330,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7350,7 +7350,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures", "pin-project", @@ -7378,7 +7378,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7466,7 +7466,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7478,7 +7478,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-eips", @@ -7515,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7540,7 +7540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "eyre", @@ -7551,7 +7551,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7566,7 +7566,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7586,7 +7586,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7606,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,11 +7631,11 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.1.2" +version = "1.1.3" [[package]] name = "reth-etl" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "rayon", @@ -7645,7 +7645,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7696,7 +7696,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7711,7 +7711,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7731,7 +7731,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7774,7 +7774,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "eyre", @@ -7807,7 +7807,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7824,7 +7824,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "serde", "serde_json", @@ -7833,7 +7833,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7859,7 +7859,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "async-trait", "bytes", @@ -7881,7 +7881,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.2" +version = "1.1.3" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7902,7 +7902,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.2" +version = "1.1.3" dependencies = [ "bindgen", "cc", @@ -7910,7 +7910,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures", "metrics", @@ -7921,14 +7921,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures-util", "if-addrs", @@ -7942,7 +7942,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8027,7 +8027,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8049,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8065,7 +8065,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -8078,7 +8078,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.2" +version = "1.1.3" dependencies = [ "anyhow", "bincode", @@ -8096,7 +8096,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8116,7 +8116,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8181,7 +8181,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8232,7 +8232,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8281,7 +8281,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8304,7 +8304,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.2" +version = "1.1.3" dependencies = [ "eyre", "http", @@ -8327,7 +8327,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8338,7 +8338,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8358,7 +8358,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8409,7 +8409,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8428,7 +8428,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8458,7 +8458,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8469,7 +8469,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8523,7 +8523,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8583,7 +8583,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8628,7 +8628,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-codecs", "reth-db-api", @@ -8639,7 +8639,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8661,7 +8661,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8674,7 +8674,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8692,7 +8692,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8701,7 +8701,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8711,7 +8711,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8758,7 +8758,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8786,7 +8786,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8836,7 +8836,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8868,7 +8868,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -8888,7 +8888,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8906,7 +8906,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8978,7 +8978,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -9002,7 +9002,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9021,7 +9021,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9071,7 +9071,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9109,7 +9109,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9152,7 +9152,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9194,7 +9194,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9211,7 +9211,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9226,7 +9226,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9243,7 +9243,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9294,7 +9294,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9323,7 +9323,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -9340,7 +9340,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "clap", @@ -9375,7 +9375,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9399,7 +9399,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9412,7 +9412,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "auto_impl", "dyn-clone", @@ -9429,7 +9429,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9443,7 +9443,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "tokio", "tokio-stream", @@ -9452,7 +9452,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "eyre", @@ -9466,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9513,7 +9513,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9542,7 +9542,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9572,7 +9572,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9601,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9628,7 +9628,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index beef7d330dd..ab2fba7b99e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.2" +version = "1.1.3" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" From d97449dae495a2243000078fa30b3b164ef5891b Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 17:50:16 +0400 Subject: [PATCH 968/970] fix: bounds for BundleApi (#13267) --- crates/node/builder/src/rpc.rs | 12 +++++++++--- crates/optimism/node/src/node.rs | 2 ++ crates/rpc/rpc-builder/src/lib.rs | 28 ++++++++++++++++++++-------- crates/rpc/rpc/src/eth/bundle.rs | 8 ++++++-- 4 files changed, 37 insertions(+), 13 deletions(-) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index c8e08078bb9..32123b194e6 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -18,7 +18,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, PooledTransactionsElement}; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::sync::Arc; use crate::EthApiBuilderCtx; @@ -403,7 +404,9 @@ where impl RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Pool: TransactionPool>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -531,7 +534,10 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents>, + N: FullNodeComponents< + Types: ProviderNodeTypes, + Pool: TransactionPool>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 54ff36dabac..e9e7e23bc9c 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -244,6 +244,7 @@ where Storage = OpStorage, Engine = OpEngineTypes, >, + Pool: TransactionPool>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -294,6 +295,7 @@ where Storage = OpStorage, Engine = OpEngineTypes, >, + Pool: TransactionPool>, >, OpEngineValidator: EngineValidator<::Engine>, { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ce29b77f09d..877e8089786 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -19,7 +19,7 @@ //! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::{Header, TransactionSigned}; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -55,8 +55,12 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool> -//! + Unpin +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin //! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: @@ -98,7 +102,7 @@ //! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::{Header, TransactionSigned}; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -141,8 +145,12 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool> -//! + Unpin +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin //! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: @@ -222,7 +230,7 @@ use reth_consensus::FullConsensus; use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::NodePrimitives; +use reth_primitives::{NodePrimitives, PooledTransactionsElement}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, @@ -240,7 +248,7 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; +use reth_transaction_pool::{noop::NoopTransactionPool, PoolTransaction, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; @@ -315,6 +323,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, BlockExecutor: BlockExecutorProvider, { @@ -706,6 +715,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, { let Self { @@ -831,6 +841,7 @@ where Block = ::Block, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, Pool: TransactionPool::Transaction>, { @@ -1371,6 +1382,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 478d1de1c51..b12e021335e 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -285,10 +285,14 @@ where #[async_trait::async_trait] impl EthCallBundleApiServer for EthBundle where - Eth: EthTransactions + LoadPendingBlock + Call + 'static, + Eth: EthTransactions< + Pool: TransactionPool>, + > + LoadPendingBlock + + Call + + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Self::call_bundle(self, request).await.map_err(Into::into) + self.call_bundle(request).await.map_err(Into::into) } } From 3ea2522baf18a2033bb1884f9331699b4e1fd6a9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 09:53:48 +0100 Subject: [PATCH 969/970] fix: fromstr impl for miner variant (#13289) --- crates/rpc/rpc-server-types/src/module.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 3eb34b34a7f..7dc92993611 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -320,6 +320,7 @@ impl FromStr for RethRpcModule { "reth" => Self::Reth, "ots" => Self::Ots, "flashbots" => Self::Flashbots, + "miner" => Self::Miner, _ => return Err(ParseError::VariantNotFound), }) } From 15fac0873e91ea29ab2e605bfba17bedcd7a6084 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 11:08:39 +0100 Subject: [PATCH 970/970] chore: bump version 1.1.4 (#13291) --- Cargo.lock | 244 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92b089a0eaf..fb25ea0121e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2647,7 +2647,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5532,7 +5532,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "reth-cli-util", @@ -6479,7 +6479,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6552,7 +6552,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6636,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6672,7 +6672,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6710,7 +6710,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6725,7 +6725,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6754,7 +6754,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6775,7 +6775,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-genesis", "clap", @@ -6788,7 +6788,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.3" +version = "1.1.4" dependencies = [ "ahash", "alloy-consensus", @@ -6856,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-tasks", "tokio", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6883,7 +6883,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6906,7 +6906,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.3" +version = "1.1.4" dependencies = [ "convert_case", "proc-macro2", @@ -6917,7 +6917,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "eyre", @@ -6933,7 +6933,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6946,7 +6946,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6963,7 +6963,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6986,7 +6986,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7027,7 +7027,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7056,7 +7056,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7085,7 +7085,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7129,7 +7129,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7153,7 +7153,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7181,7 +7181,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7268,7 +7268,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.3" +version = "1.1.4" dependencies = [ "aes", "alloy-primitives", @@ -7298,7 +7298,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7330,7 +7330,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7350,7 +7350,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures", "pin-project", @@ -7378,7 +7378,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7466,7 +7466,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7478,7 +7478,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-eips", @@ -7515,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7540,7 +7540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -7551,7 +7551,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7566,7 +7566,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7586,7 +7586,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7606,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,11 +7631,11 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.1.3" +version = "1.1.4" [[package]] name = "reth-etl" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "rayon", @@ -7645,7 +7645,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7696,7 +7696,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7711,7 +7711,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7731,7 +7731,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7774,7 +7774,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "eyre", @@ -7807,7 +7807,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7824,7 +7824,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "serde", "serde_json", @@ -7833,7 +7833,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7859,7 +7859,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "async-trait", "bytes", @@ -7881,7 +7881,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.3" +version = "1.1.4" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7902,7 +7902,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.3" +version = "1.1.4" dependencies = [ "bindgen", "cc", @@ -7910,7 +7910,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures", "metrics", @@ -7921,14 +7921,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures-util", "if-addrs", @@ -7942,7 +7942,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8027,7 +8027,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8049,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8065,7 +8065,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -8078,7 +8078,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.3" +version = "1.1.4" dependencies = [ "anyhow", "bincode", @@ -8096,7 +8096,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8116,7 +8116,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8181,7 +8181,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8232,7 +8232,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8281,7 +8281,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8304,7 +8304,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.3" +version = "1.1.4" dependencies = [ "eyre", "http", @@ -8327,7 +8327,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8338,7 +8338,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8358,7 +8358,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8409,7 +8409,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8428,7 +8428,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8458,7 +8458,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8469,7 +8469,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8523,7 +8523,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8583,7 +8583,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8628,7 +8628,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-codecs", "reth-db-api", @@ -8639,7 +8639,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8661,7 +8661,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8674,7 +8674,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8692,7 +8692,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8701,7 +8701,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8711,7 +8711,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8758,7 +8758,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8786,7 +8786,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8836,7 +8836,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8868,7 +8868,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -8888,7 +8888,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8906,7 +8906,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8978,7 +8978,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -9002,7 +9002,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9021,7 +9021,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9071,7 +9071,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9109,7 +9109,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9152,7 +9152,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9194,7 +9194,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9211,7 +9211,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9226,7 +9226,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9243,7 +9243,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9294,7 +9294,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9323,7 +9323,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -9340,7 +9340,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "assert_matches", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "clap", @@ -9375,7 +9375,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9399,7 +9399,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9412,7 +9412,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "auto_impl", "dyn-clone", @@ -9429,7 +9429,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9443,7 +9443,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "tokio", "tokio-stream", @@ -9452,7 +9452,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -9466,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9513,7 +9513,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9542,7 +9542,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9572,7 +9572,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9601,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9628,7 +9628,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index ab2fba7b99e..4ee8fac0e74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.3" +version = "1.1.4" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0"