Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .config/zepter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ workflows:

"--left-side-outside-workspace=ignore",
# Auxiliary flags:
"--ignore-missing-propagate=reth-evm-ethereum/test-utils:grevm/test-utils",
"--offline",
"--locked",
"--show-path",
Expand Down
22 changes: 11 additions & 11 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion crates/chain-state/src/in_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ pub struct BlockState<N: NodePrimitives = EthPrimitives> {
/// The executed block that determines the state after this block has been executed.
block: ExecutedBlockWithTrieUpdates<N>,
/// The block's parent block if it exists.
parent: Option<Arc<BlockState<N>>>,
parent: Option<Arc<Self>>,
}

impl<N: NodePrimitives> BlockState<N> {
Expand Down
2 changes: 1 addition & 1 deletion crates/cli/util/src/sigsegv_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ pub fn install() {
libc::sigaltstack(&raw const alt_stack, ptr::null_mut());

let mut sa: libc::sigaction = mem::zeroed();
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
sa.sa_sigaction = print_stack_trace as *const () as libc::sighandler_t;
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
libc::sigemptyset(&raw mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
Expand Down
2 changes: 1 addition & 1 deletion crates/e2e-test-utils/src/setup_import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ mod tests {
use super::*;
use crate::test_rlp_utils::{create_fcu_json, generate_test_blocks, write_blocks_to_rlp};
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_db::mdbx::DatabaseArguments;
use reth_db::DatabaseArguments;
use reth_payload_builder::EthPayloadBuilderAttributes;
use reth_primitives::SealedBlock;
use reth_provider::{
Expand Down
6 changes: 3 additions & 3 deletions crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ where
Box::pin(async move {
let mut accepted_check: bool = false;

let mut latest_block = env
let latest_block = env
.current_block_info()
.ok_or_else(|| eyre::eyre!("No latest block information available"))?;

Expand Down Expand Up @@ -605,8 +605,8 @@ where
rpc_latest_header.hash;

// update local copy for any further usage in this scope
latest_block.hash = rpc_latest_header.hash;
latest_block.number = rpc_latest_header.inner.number;
// latest_block.hash = rpc_latest_header.hash;
// latest_block.number = rpc_latest_header.inner.number;
}
}

Expand Down
3 changes: 0 additions & 3 deletions crates/e2e-test-utils/tests/e2e-testsuite/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,9 +338,6 @@ async fn test_testsuite_multinode_block_production() -> Result<()> {
.with_action(MakeCanonical::new())
.with_action(CaptureBlockOnNode::new("node0_tip", 0))
.with_action(CompareNodeChainTips::expect_same(0, 1))
// node 0 already has the state and can continue producing blocks
.with_action(ProduceBlocks::<EthEngineTypes>::new(2))
.with_action(MakeCanonical::new())
.with_action(CaptureBlockOnNode::new("node0_tip_2", 0))
// verify both nodes remain in sync
.with_action(CompareNodeChainTips::expect_same(0, 1));
Expand Down
2 changes: 1 addition & 1 deletion crates/engine/tree/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ test-utils = [
"reth-node-ethereum/test-utils",
"reth-evm-ethereum/test-utils",
]
failpoints = ["fail/failpoints"]
failpoints = ["fail/failpoints", "reth-db/failpoints"]

[dependencies.fail]
version = "0.5"
Expand Down
134 changes: 79 additions & 55 deletions crates/engine/tree/src/persistence.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use reth_provider::{
providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, BlockWriter,
ChainStateBlockWriter, DatabaseProviderFactory, HistoryWriter, ProviderFactory,
StageCheckpointWriter, StateWriter, StaticFileProviderFactory, StaticFileWriter,
StorageLocation, TrieWriterV2, PERSIST_BLOCK_CACHE,
StorageLocation, TrieWriter, TrieWriterV2, PERSIST_BLOCK_CACHE,
};
use reth_prune::{PrunerError, PrunerOutput, PrunerWithFactory};
use reth_stages_api::{MetricEvent, MetricEventsSender, StageCheckpoint, StageId};
Expand Down Expand Up @@ -151,6 +151,39 @@ where
Ok(new_tip_hash.map(|hash| BlockNumHash { hash, number: new_tip_num }))
}

fn get_checkpoint<TX: DbTx>(
tx: &TX,
stage_id: StageId,
check_next: Option<u64>,
) -> Result<StageCheckpoint, ProviderError> {
let ck = tx
.get::<tables::StageCheckpoints>(stage_id.to_string())
.map_err(ProviderError::Database)
.map(Option::unwrap_or_default)?;
if let Some(next) = check_next {
if next == 0 {
// for test
assert_eq!(ck.block_number, 0);
} else {
assert_eq!(
ck.block_number + 1,
next,
"Stage {stage_id}'s checkpoint is inconsistent"
);
}
}
Ok(ck)
}

fn update_checkpoint<TX: DbTxMut>(
tx: &TX,
stage_id: StageId,
checkpoint: StageCheckpoint,
) -> Result<(), ProviderError> {
tx.put::<tables::StageCheckpoints>(stage_id.to_string(), checkpoint)
.map_err(ProviderError::Database)
}

fn on_save_blocks(
&self,
blocks: Vec<ExecutedBlockWithTrieUpdates<N::Primitives>>,
Expand All @@ -172,11 +205,12 @@ where

for ExecutedBlockWithTrieUpdates {
block: ExecutedBlock { recovered_block, execution_output, hashed_state },
trie: _,
trie,
triev2,
} in blocks
{
let block_number = recovered_block.number();
let block_hash = recovered_block.hash();
let inner_provider = &self.provider;
info!(target: "persistence::save_block", block_number = block_number, "Write block updates into DB");

Expand All @@ -193,12 +227,11 @@ where
let start = Instant::now();
let provider_rw = inner_provider.database_provider_rw()?;
let static_file_provider = inner_provider.static_file_provider();
let ck = provider_rw
.tx_ref()
.get::<tables::StageCheckpoints>(StageId::Execution.to_string())
.map_err(ProviderError::Database)?
.unwrap_or_default();
assert_eq!(ck.block_number + 1, block_number);
let ck = Self::get_checkpoint(
provider_rw.tx_ref(),
StageId::Execution,
Some(block_number),
)?;
let body_indices = provider_rw.insert_block(
Arc::unwrap_or_clone(recovered_block),
StorageLocation::Both,
Expand All @@ -212,13 +245,11 @@ where
StorageLocation::StaticFiles,
Some(vec![body_indices]),
)?;
provider_rw
.tx_ref()
.put::<tables::StageCheckpoints>(
StageId::Execution.to_string(),
StageCheckpoint { block_number, ..ck },
)
.map_err(ProviderError::Database)?;
Self::update_checkpoint(
provider_rw.tx_ref(),
StageId::Execution,
StageCheckpoint { block_number, ..ck },
)?;
static_file_provider.commit()?;
provider_rw.commit()?;
set_fail_point!("persistence::after_state_commit");
Expand All @@ -227,24 +258,21 @@ where

let start = Instant::now();
let provider_rw = inner_provider.database_provider_rw()?;
let ck = provider_rw
.tx_ref()
.get::<tables::StageCheckpoints>(StageId::AccountHashing.to_string())
.map_err(ProviderError::Database)?
.unwrap_or_default();
assert_eq!(ck.block_number + 1, block_number);
let ck = Self::get_checkpoint(
provider_rw.tx_ref(),
StageId::AccountHashing,
Some(block_number),
)?;
// insert hashes and intermediate merkle nodes
provider_rw.write_hashed_state(
&Arc::unwrap_or_clone(hashed_state).into_sorted(),
)?;
set_fail_point!("persistence::after_hashed_state");
provider_rw
.tx_ref()
.put::<tables::StageCheckpoints>(
StageId::AccountHashing.to_string(),
StageCheckpoint { block_number, ..ck },
)
.map_err(ProviderError::Database)?;
Self::update_checkpoint(
provider_rw.tx_ref(),
StageId::AccountHashing,
StageCheckpoint { block_number, ..ck },
)?;
provider_rw.commit()?;
set_fail_point!("persistence::after_hashed_state_commit");
metrics::histogram!(
Expand All @@ -256,23 +284,18 @@ where
if !get_gravity_config().validator_node_only {
let start = Instant::now();
let provider_rw = inner_provider.database_provider_rw()?;
let ck = provider_rw
.tx_ref()
.get::<tables::StageCheckpoints>(
StageId::IndexAccountHistory.to_string(),
)
.map_err(ProviderError::Database)?
.unwrap_or_default();
assert_eq!(ck.block_number + 1, block_number);
let ck = Self::get_checkpoint(
provider_rw.tx_ref(),
StageId::IndexAccountHistory,
Some(block_number),
)?;
provider_rw.update_history_indices(block_number..=block_number)?;
set_fail_point!("persistence::after_history_indices");
provider_rw
.tx_ref()
.put::<tables::StageCheckpoints>(
StageId::IndexAccountHistory.to_string(),
StageCheckpoint { block_number, ..ck },
)
.map_err(ProviderError::Database)?;
Self::update_checkpoint(
provider_rw.tx_ref(),
StageId::IndexAccountHistory,
StageCheckpoint { block_number, ..ck },
)?;
provider_rw.commit()?;
set_fail_point!("persistence::after_history_commit");
metrics::histogram!(
Expand All @@ -286,28 +309,29 @@ where
let trie_handle = scope.spawn(|| -> Result<(), PersistenceError> {
let start = Instant::now();
let provider_rw = inner_provider.database_provider_rw()?;
let ck = provider_rw
.tx_ref()
.get::<tables::StageCheckpoints>(StageId::MerkleExecute.to_string())
.map_err(ProviderError::Database)?
.unwrap_or_default();
let ck = Self::get_checkpoint(
provider_rw.tx_ref(),
StageId::MerkleExecute,
None,
)?;
if ck.block_number + 1 != block_number {
info!(target: "persistence::trie_update",
checkpoint = ck.block_number,
block_number = block_number,
"Detected interrupted trie update, but trie has idempotency");
}
provider_rw.write_trie_updates(
trie.as_ref().ok_or(ProviderError::MissingTrieUpdates(block_hash))?,
)?;
provider_rw
.write_trie_updatesv2(triev2.as_ref())
.map_err(ProviderError::Database)?;
set_fail_point!("persistence::after_trie_update");
provider_rw
.tx_ref()
.put::<tables::StageCheckpoints>(
StageId::MerkleExecute.to_string(),
StageCheckpoint { block_number, ..ck },
)
.map_err(ProviderError::Database)?;
Self::update_checkpoint(
provider_rw.tx_ref(),
StageId::MerkleExecute,
StageCheckpoint { block_number, ..ck },
)?;
provider_rw.commit()?;
set_fail_point!("persistence::after_trie_commit");
metrics::histogram!(
Expand Down
Loading
Loading