From 34b956035996aab1a4c3f1cff04daeb8f2fb3250 Mon Sep 17 00:00:00 2001 From: 2020xibao <2020xibao@gmail.com> Date: Fri, 28 Feb 2025 12:24:19 +0800 Subject: [PATCH 01/30] feat: adjust second to millisecond in opbnb inner workflow --- op-batcher/batcher/channel_manager.go | 2 ++ op-node/rollup/derive/attributes.go | 23 ++++++------- op-node/rollup/derive/attributes_queue.go | 2 +- op-node/rollup/derive/batch_queue.go | 14 ++++---- op-node/rollup/derive/batch_queue_test.go | 16 ++++----- op-node/rollup/derive/batches.go | 36 ++++++++++----------- op-node/rollup/derive/channel_out.go | 13 ++++++-- op-node/rollup/derive/l1_block_info.go | 10 +++--- op-node/rollup/derive/l1_block_info_test.go | 4 +-- op-node/rollup/derive/l2block_util.go | 3 ++ op-node/rollup/derive/payload_util.go | 7 ++-- op-node/rollup/derive/singular_batch.go | 2 +- op-node/rollup/derive/span_batch.go | 8 ++--- op-node/rollup/derive/span_batch_test.go | 2 +- op-node/rollup/driver/sequencer.go | 12 +++---- op-node/rollup/driver/sequencer_test.go | 2 +- op-node/rollup/driver/state.go | 2 +- op-node/rollup/types.go | 3 +- op-node/service.go | 11 +++++++ op-service/eth/block_info.go | 11 +++++++ op-service/eth/id.go | 13 +++++++- op-service/sources/types.go | 5 +++ op-service/testutils/l1info.go | 4 +++ 23 files changed, 132 insertions(+), 73 deletions(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index acbdfd55ae..d2e9d86a02 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" ) var ErrReorg = errors.New("block does not extend existing chain") @@ -367,6 +368,7 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo Number: block.NumberU64(), ParentHash: block.ParentHash(), Time: block.Time(), + MilliPartTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts millisecond part L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number}, SequenceNumber: l1info.SequenceNumber, } diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 933a946c13..2dc32e09cf 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/bsc" @@ -107,7 +108,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Calculate bsc block base fee var l1BaseFee *big.Int - if ba.rollupCfg.IsSnow(l2Parent.Time + ba.rollupCfg.BlockTime) { + if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime) / 1000) { l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch) if err != nil { return nil, err @@ -124,21 +125,21 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee) // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 - nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime - if nextL2Time < l1Info.Time() { + nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime + if nextL2MilliTime < l1Info.MilliTime() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", - l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time())) + l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MilliTime())) } var upgradeTxs []hexutil.Bytes - if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) { + if ba.rollupCfg.IsEcotoneActivationBlock(nextL2MilliTime / 1000) { upgradeTxs, err = EcotoneNetworkUpgradeTransactions() if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to build ecotone network upgrade txs: %w", err)) } } - if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) { + if ba.rollupCfg.IsFjordActivationBlock(nextL2MilliTime / 1000) { fjord, err := FjordNetworkUpgradeTransactions() if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err)) @@ -146,7 +147,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex upgradeTxs = append(upgradeTxs, fjord...) } - l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time) + l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2MilliTime) if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err)) } @@ -157,12 +158,12 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex txs = append(txs, upgradeTxs...) var withdrawals *types.Withdrawals - if ba.rollupCfg.IsCanyon(nextL2Time) { + if ba.rollupCfg.IsCanyon(nextL2MilliTime / 1000) { withdrawals = &types.Withdrawals{} } var parentBeaconRoot *common.Hash - if ba.rollupCfg.IsEcotone(nextL2Time) { + if ba.rollupCfg.IsEcotone(nextL2MilliTime / 1000) { parentBeaconRoot = l1Info.ParentBeaconRoot() if parentBeaconRoot == nil { // default to zero hash if there is no beacon-block-root available parentBeaconRoot = new(common.Hash) @@ -170,8 +171,8 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex } return ð.PayloadAttributes{ - Timestamp: hexutil.Uint64(nextL2Time), - PrevRandao: eth.Bytes32(l1Info.MixDigest()), + Timestamp: hexutil.Uint64(nextL2MilliTime / 1000), // second part + PrevRandao: uint256.NewInt(nextL2MilliTime % 1000).Bytes32(), // millisecond part SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, Transactions: txs, NoTxPool: true, diff --git a/op-node/rollup/derive/attributes_queue.go b/op-node/rollup/derive/attributes_queue.go index f9266112b9..0361ea707c 100644 --- a/op-node/rollup/derive/attributes_queue.go +++ b/op-node/rollup/derive/attributes_queue.go @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash)) } // sanity check timestamp - if expected := l2SafeHead.Time + aq.config.BlockTime; expected != batch.Timestamp { + if expected := l2SafeHead.MillisecondTimestamp() + aq.config.BlockTime; expected != batch.Timestamp { return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected)) } fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index 3dbfe20d30..fb796c0af3 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si if len(bq.nextSpan) > 0 { // There are cached singular batches derived from the span batch. // Check if the next cached batch matches the given parent block. - if bq.nextSpan[0].Timestamp == parent.Time+bq.config.BlockTime { + if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.BlockTime { // Pop first one and return. nextBatch := bq.popNextBatch(parent) // len(bq.nextSpan) == 0 means it's the last batch of the span. @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren // Find the first-seen batch that matches all validity conditions. // We may not have sufficient information to proceed filtering, and then we stop. // There may be none: in that case we force-create an empty batch - nextTimestamp := parent.Time + bq.config.BlockTime + nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.BlockTime var nextBatch *BatchWithL1InclusionBlock // Go over all batches, in order of inclusion, and find the first batch we can accept. @@ -304,7 +304,7 @@ batchLoop: firstOfEpoch := epoch.Number == parent.L1Origin.Number+1 bq.log.Trace("Potentially generating an empty batch", - "expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp, + "expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "next_ms_timestamp", nextMilliTimestamp, "epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch) if !forceEmptyBatches { @@ -321,20 +321,20 @@ batchLoop: // Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin, // to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a // batch to ensure that we at least have one batch per epoch. - if nextTimestamp < nextEpoch.Time || firstOfEpoch { - bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp) + if nextMilliTimestamp < nextEpoch.MilliTimestamp() || firstOfEpoch { + bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextMilliTimestamp) return &SingularBatch{ ParentHash: parent.Hash, EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, - Timestamp: nextTimestamp, + Timestamp: nextMilliTimestamp, Transactions: nil, }, nil } // At this point we have auto generated every batch for the current epoch // that we can, so we can advance to the next epoch. - bq.log.Trace("Advancing internal L1 blocks", "next_timestamp", nextTimestamp, "next_epoch_time", nextEpoch.Time) + bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MilliTimestamp()) bq.l1Blocks = bq.l1Blocks[1:] return nil, io.EOF } diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index 6712ae15a6..b0d7f76a02 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -242,7 +242,7 @@ func BatchQueueEager(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -251,12 +251,12 @@ func BatchQueueEager(t *testing.T, batchType int) { // expected output of BatchQueue.NextBatch() expectedOutputBatches := []*SingularBatch{ - b(cfg.L2ChainID, 12, l1[0]), - b(cfg.L2ChainID, 14, l1[0]), - b(cfg.L2ChainID, 16, l1[0]), - b(cfg.L2ChainID, 18, l1[0]), - b(cfg.L2ChainID, 20, l1[0]), - b(cfg.L2ChainID, 22, l1[0]), + b(cfg.L2ChainID, 12000, l1[0]), + b(cfg.L2ChainID, 14000, l1[0]), + b(cfg.L2ChainID, 16000, l1[0]), + b(cfg.L2ChainID, 18000, l1[0]), + b(cfg.L2ChainID, 20000, l1[0]), + b(cfg.L2ChainID, 22000, l1[0]), nil, } // expected error of BatchQueue.NextBatch() @@ -295,7 +295,7 @@ func BatchQueueEager(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime + safeHead.Time += cfg.BlockTime / 1000 safeHead.Hash = mockHash(b.Timestamp, 2) safeHead.L1Origin = b.Epoch() } diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index bde2280745..db65a1b0f5 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -67,13 +67,13 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo } epoch := l1Blocks[0] - nextTimestamp := l2SafeHead.Time + cfg.BlockTime - if batch.Timestamp > nextTimestamp { - log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) + nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.BlockTime + if batch.Timestamp > nextMilliTimestamp { + log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextMilliTimestamp) return BatchFuture } - if batch.Timestamp < nextTimestamp { - log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp) + if batch.Timestamp < nextMilliTimestamp { + log.Warn("dropping batch with old timestamp", "min_timestamp", nextMilliTimestamp) return BatchDrop } @@ -118,8 +118,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchDrop } - if batch.Timestamp < batchOrigin.Time { - log.Warn("batch timestamp is less than L1 origin timestamp", "l2_timestamp", batch.Timestamp, "l1_timestamp", batchOrigin.Time, "origin", batchOrigin.ID()) + if batch.Timestamp < batchOrigin.MilliTimestamp() { + log.Warn("batch timestamp is less than L1 origin timestamp", "l2_ms_timestamp", batch.Timestamp, "l1_ms_timestamp", batchOrigin.MilliTimestamp(), "origin", batchOrigin.ID()) return BatchDrop } @@ -194,13 +194,13 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B return BatchDrop } - nextTimestamp := l2SafeHead.Time + cfg.BlockTime + nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.BlockTime - if batch.GetTimestamp() > nextTimestamp { - log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) + if batch.GetTimestamp() > nextMilliTimestamp { + log.Trace("received out-of-order batch for future processing after next batch", "next_ms_timestamp", nextMilliTimestamp) return BatchFuture } - if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextTimestamp { + if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextMilliTimestamp { log.Warn("span batch has no new blocks after safe head") return BatchDrop } @@ -209,17 +209,17 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B // if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead. parentNum := l2SafeHead.Number parentBlock := l2SafeHead - if batch.GetTimestamp() < nextTimestamp { - if batch.GetTimestamp() > l2SafeHead.Time { + if batch.GetTimestamp() < nextMilliTimestamp { + if batch.GetTimestamp() > l2SafeHead.MillisecondTimestamp() { // batch timestamp cannot be between safe head and next timestamp log.Warn("batch has misaligned timestamp, block time is too short") return BatchDrop } - if (l2SafeHead.Time-batch.GetTimestamp())%cfg.BlockTime != 0 { + if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%cfg.BlockTime != 0 { log.Warn("batch has misaligned timestamp, not overlapped exactly") return BatchDrop } - parentNum = l2SafeHead.Number - (l2SafeHead.Time-batch.GetTimestamp())/cfg.BlockTime - 1 + parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.BlockTime - 1 var err error parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { @@ -290,7 +290,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B } } blockTimestamp := batch.GetBlockTimestamp(i) - if blockTimestamp < l1Origin.Time { + if blockTimestamp < l1Origin.MilliTimestamp() { log.Warn("block timestamp is less than L1 origin timestamp", "l2_timestamp", blockTimestamp, "l1_timestamp", l1Origin.Time, "origin", l1Origin.ID()) return BatchDrop } @@ -307,7 +307,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Info("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid") return BatchUndecided } - if blockTimestamp >= l1Blocks[originIdx+1].Time { // check if the next L1 origin could have been adopted + if blockTimestamp >= l1Blocks[originIdx+1].MilliTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -335,7 +335,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B } // Check overlapped blocks - if batch.GetTimestamp() < nextTimestamp { + if batch.GetTimestamp() < nextMilliTimestamp { for i := uint64(0); i < l2SafeHead.Number-parentNum; i++ { safeBlockNum := parentNum + i + 1 safeBlockPayload, err := l2Fetcher.PayloadByNumber(ctx, safeBlockNum) diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index 2142796bab..33d9e4318b 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -8,9 +8,11 @@ import ( "io" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" ) var ( @@ -234,16 +236,23 @@ func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*Singul if l1InfoTx.Type() != types.DepositTxType { return nil, nil, ErrNotDepositTx } - l1Info, err := L1BlockInfoFromBytes(rollupCfg, block.Time(), l1InfoTx.Data()) + l1Info, err := L1BlockInfoFromBytes(rollupCfg, block.Time() /*second timestamp for fork*/, l1InfoTx.Data()) if err != nil { return nil, l1Info, fmt.Errorf("could not parse the L1 Info deposit: %w", err) } + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + milliPart = uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64() + } + + milliTimestamp := block.Time()*1000 + milliPart + return &SingularBatch{ ParentHash: block.ParentHash(), EpochNum: rollup.Epoch(l1Info.Number), EpochHash: l1Info.BlockHash, - Timestamp: block.Time(), + Timestamp: milliTimestamp, // has changed to milli timestamp Transactions: opaqueTxs, }, l1Info, nil } diff --git a/op-node/rollup/derive/l1_block_info.go b/op-node/rollup/derive/l1_block_info.go index 93c529750e..7a665d3ba4 100644 --- a/op-node/rollup/derive/l1_block_info.go +++ b/op-node/rollup/derive/l1_block_info.go @@ -260,7 +260,7 @@ func L1BlockInfoFromBytes(rollupCfg *rollup.Config, l2BlockTime uint64, data []b // L1InfoDeposit creates a L1 Info deposit transaction based on the L1 block, // and the L2 block-height difference with the start of the epoch. -func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, l2BlockTime uint64) (*types.DepositTx, error) { +func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, nextL2MilliTime uint64) (*types.DepositTx, error) { l1BlockInfo := L1BlockInfo{ Number: block.NumberU64(), Time: block.Time(), @@ -270,7 +270,7 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber BatcherAddr: sysCfg.BatcherAddr, } var data []byte - if isEcotoneButNotFirstBlock(rollupCfg, l2BlockTime) { + if isEcotoneButNotFirstBlock(rollupCfg, nextL2MilliTime/1000) { l1BlockInfo.BlobBaseFee = block.BlobBaseFee() if l1BlockInfo.BlobBaseFee == nil { // The L2 spec states to use the MIN_BLOB_GASPRICE from EIP-4844 if not yet active on L1. @@ -314,7 +314,7 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber Data: data, } // With the regolith fork we disable the IsSystemTx functionality, and allocate real gas - if rollupCfg.IsRegolith(l2BlockTime) { + if rollupCfg.IsRegolith(nextL2MilliTime / 1000) { out.IsSystemTransaction = false out.Gas = RegolithSystemTxGas } @@ -322,8 +322,8 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber } // L1InfoDepositBytes returns a serialized L1-info attributes transaction. -func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, l2BlockTime uint64) ([]byte, error) { - dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, l2BlockTime) +func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, nextL2MilliTime uint64) ([]byte, error) { + dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, nextL2MilliTime) if err != nil { return nil, fmt.Errorf("failed to create L1 info tx: %w", err) } diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index e5c9253ce1..81b550250c 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -139,9 +139,9 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2, + BlockTime: 2000, } - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2) + depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2000) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) diff --git a/op-node/rollup/derive/l2block_util.go b/op-node/rollup/derive/l2block_util.go index 5946247c7d..b1e6c4713b 100644 --- a/op-node/rollup/derive/l2block_util.go +++ b/op-node/rollup/derive/l2block_util.go @@ -5,6 +5,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -18,6 +19,7 @@ import ( type L2BlockRefSource interface { Hash() common.Hash ParentHash() common.Hash + MixDigest() common.Hash // millisecond part NumberU64() uint64 Time() uint64 Transactions() types.Transactions @@ -59,6 +61,7 @@ func L2BlockToBlockRef(rollupCfg *rollup.Config, block L2BlockRefSource) (eth.L2 Number: number, ParentHash: block.ParentHash(), Time: block.Time(), + MilliPartTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index 06a3a5a7f7..5328d9f612 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -4,10 +4,10 @@ import ( "encoding/binary" "fmt" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" ) // PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload, @@ -33,7 +33,7 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) if tx.Type() != types.DepositTxType { return eth.L2BlockRef{}, fmt.Errorf("first payload tx has unexpected tx type: %d", tx.Type()) } - info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp), tx.Data()) + info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp) /* second timestamp for fork*/, tx.Data()) if err != nil { return eth.L2BlockRef{}, fmt.Errorf("failed to parse L1 info deposit tx from L2 block: %w", err) } @@ -46,6 +46,7 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) Number: uint64(payload.BlockNumber), ParentHash: payload.ParentHash, Time: uint64(payload.Timestamp), + MilliPartTime: uint256.NewInt(0).SetBytes32(payload.PrevRandao[:]).Uint64(), // adapts millisecond part L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/singular_batch.go b/op-node/rollup/derive/singular_batch.go index fdb867efbe..4d2a033cc9 100644 --- a/op-node/rollup/derive/singular_batch.go +++ b/op-node/rollup/derive/singular_batch.go @@ -23,7 +23,7 @@ type SingularBatch struct { ParentHash common.Hash // parent L2 block hash EpochNum rollup.Epoch // aka l1 num EpochHash common.Hash // l1 block hash - Timestamp uint64 + Timestamp uint64 // millisecond Transactions []hexutil.Bytes } diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index aa95b3838a..8370661bd8 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -32,7 +32,7 @@ var ErrTooBigSpanBatchSize = errors.New("span batch size limit reached") var ErrEmptySpanBatch = errors.New("span-batch must not be empty") type spanBatchPrefix struct { - relTimestamp uint64 // Relative timestamp of the first block + relTimestamp uint64 // Relative timestamp of the first block, millisecond l1OriginNum uint64 // L1 origin number parentCheck [20]byte // First 20 bytes of the first block's parent hash l1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash @@ -340,7 +340,7 @@ func (b *RawSpanBatch) encode(w io.Writer) error { // derive converts RawSpanBatch into SpanBatch, which has a list of SpanBatchElement. // We need chain config constants to derive values for making payload attributes. -func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { +func (b *RawSpanBatch) derive(milliBlockInterval, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { if b.blockCount == 0 { return nil, ErrEmptySpanBatch } @@ -368,7 +368,7 @@ func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.I txIdx := 0 for i := 0; i < int(b.blockCount); i++ { batch := SpanBatchElement{} - batch.Timestamp = genesisTimestamp + b.relTimestamp + blockTime*uint64(i) + batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp + milliBlockInterval*uint64(i) batch.EpochNum = rollup.Epoch(blockOriginNums[i]) for j := 0; j < int(b.blockTxCounts[i]); j++ { batch.Transactions = append(batch.Transactions, fullTxs[txIdx]) @@ -402,7 +402,7 @@ type SpanBatchElement struct { func singularBatchToElement(singularBatch *SingularBatch) *SpanBatchElement { return &SpanBatchElement{ EpochNum: singularBatch.EpochNum, - Timestamp: singularBatch.Timestamp, + Timestamp: singularBatch.Timestamp, // ms Transactions: singularBatch.Transactions, } } diff --git a/op-node/rollup/derive/span_batch_test.go b/op-node/rollup/derive/span_batch_test.go index 4c02c46b2d..6f9fe1e475 100644 --- a/op-node/rollup/derive/span_batch_test.go +++ b/op-node/rollup/derive/span_batch_test.go @@ -337,7 +337,7 @@ func TestSpanBatchDerive(t *testing.T) { rng := rand.New(rand.NewSource(0xbab0bab0)) chainID := new(big.Int).SetUint64(rng.Uint64()) - l2BlockTime := uint64(2) + l2BlockTime := uint64(2000) for originChangedBit := 0; originChangedBit < 2; originChangedBit++ { singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 7ad227d6cf..100c2d2ee1 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -154,7 +154,7 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { if safe { d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", buildingOnto, "onto_time", buildingOnto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - return time.Second * time.Duration(d.rollupCfg.BlockTime) + return time.Millisecond * time.Duration(d.rollupCfg.BlockTime) } head := d.engine.UnsafeL2Head() @@ -166,8 +166,8 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { return delay } - blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Second - payloadTime := time.Unix(int64(head.Time+d.rollupCfg.BlockTime), 0) + blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Millisecond + payloadTime := time.UnixMilli(int64(head.Time + d.rollupCfg.BlockTime)) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, @@ -226,7 +226,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if safe { d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) return nil, nil } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) @@ -236,7 +236,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) return nil, err } else if errors.Is(err, derive.ErrTemporary) { @@ -265,7 +265,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer temporarily failed to start building new block", "err", err) diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index c9dd1d3ff7..9e67563b62 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -184,7 +184,7 @@ func TestSequencerChaosMonkey(t *testing.T) { L2Time: l1Time + 300, // L2 may start with a relative old L1 origin and will have to catch it up SystemConfig: eth.SystemConfig{}, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 30, } // keep track of the L1 timestamps we mock because sometimes we only have the L1 hash/num handy diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index baf95e6ca8..004c70ff42 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -269,7 +269,7 @@ func (s *Driver) eventLoop() { // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2 + syncCheckInterval := time.Duration(s.config.BlockTime) * time.Millisecond * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() lastUnsafeL2 := s.engineController.UnsafeL2Head() diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 3d3aa20759..57be9273b5 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -72,7 +72,8 @@ type PlasmaConfig struct { type Config struct { // Genesis anchor point of the rollup Genesis Genesis `json:"genesis"` - // Seconds per L2 block + // BlockTime is the interval configuration of L2 block; + // which supports the new millisecond unit and is compatible with the legacy second unit. BlockTime uint64 `json:"block_time"` // Sequencer batches may not be more than MaxSequencerDrift seconds after // the L1 timestamp of the sequencing window end. diff --git a/op-node/service.go b/op-node/service.go index 134a916ee0..fa54cf0bca 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -43,6 +43,17 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { rollupConfig.ProtocolVersionsAddress = common.Address{} } + { + if rollupConfig.BlockTime >= 1 && rollupConfig.BlockTime <= 3 { + // Convert legacy second-level timestamp to millisecond timestamp, + // This is a compatibility behavior. + rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 + } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > 750 { + return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) + } + // rollupConfig.BlockTime is millisecond block interval + } + configPersistence := NewConfigPersistence(ctx) driverConfig := NewDriverConfig(ctx) diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index 268c6d934b..f712beeeda 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -16,6 +16,7 @@ type BlockInfo interface { Root() common.Hash // state-root NumberU64() uint64 Time() uint64 + MilliTime() uint64 // MixDigest field, reused for randomness after The Merge (Bellatrix hardfork) MixDigest() common.Hash BaseFee() *big.Int @@ -72,6 +73,11 @@ func (b blockInfo) ParentBeaconRoot() *common.Hash { return b.Block.BeaconRoot() } +func (b blockInfo) MilliTime() uint64 { + // TODO: adapt L1 timestamp + return b.Block.Time() * 1000 +} + func BlockToInfo(b *types.Block) BlockInfo { return blockInfo{b} } @@ -102,6 +108,11 @@ func (h headerBlockInfo) Time() uint64 { return h.Header.Time } +func (h headerBlockInfo) MilliTime() uint64 { + // TODO: adapt L1 timestamp + return h.Header.Time * 1000 +} + func (h headerBlockInfo) MixDigest() common.Hash { return h.Header.MixDigest } diff --git a/op-service/eth/id.go b/op-service/eth/id.go index 7beeabfe32..e221508e31 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -34,11 +34,16 @@ type L2BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` + Time uint64 `json:"timestamp"` // second timestamp + MilliPartTime uint64 `json:"milliparttime"` // support millisecond L1Origin BlockID `json:"l1origin"` SequenceNumber uint64 `json:"sequenceNumber"` // distance to first block of epoch } +func (id L2BlockRef) MillisecondTimestamp() uint64 { + return id.Time*1000 + id.MilliPartTime +} + func (id L2BlockRef) String() string { return fmt.Sprintf("%s:%d", id.Hash.String(), id.Number) } @@ -54,6 +59,12 @@ type L1BlockRef struct { Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` Time uint64 `json:"timestamp"` + // TODO: +} + +func (id L1BlockRef) MilliTimestamp() uint64 { + // TODO: adapt L1 + return id.Time * 1000 } func (id L1BlockRef) String() string { diff --git a/op-service/sources/types.go b/op-service/sources/types.go index afb994bd7c..b57beb7db8 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -63,6 +63,11 @@ func (h headerInfo) Time() uint64 { return h.Header.Time } +func (h headerInfo) MilliTime() uint64 { + // TODO: adapt L1 timestamp + return h.Header.Time * 1000 +} + func (h headerInfo) MixDigest() common.Hash { return h.Header.MixDigest } diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index 8f04b71fed..dcc45604df 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -56,6 +56,10 @@ func (l *MockBlockInfo) Time() uint64 { return l.InfoTime } +func (l *MockBlockInfo) MilliTime() uint64 { + return l.InfoTime * 1000 +} + func (l *MockBlockInfo) MixDigest() common.Hash { return l.InfoMixDigest } From 3b0be5f5433a873dbbae13b5f4a95fe6c8a57126 Mon Sep 17 00:00:00 2001 From: 2020xibao <2020xibao@gmail.com> Date: Fri, 28 Feb 2025 15:44:38 +0800 Subject: [PATCH 02/30] fix: make ut succeed --- op-node/rollup/chain_spec_test.go | 2 +- op-node/rollup/derive/batch_queue_test.go | 78 ++++---- op-node/rollup/derive/batch_test.go | 4 +- op-node/rollup/derive/batches.go | 8 +- op-node/rollup/derive/batches_test.go | 179 +++++++++--------- op-node/rollup/derive/engine_queue.go | 4 +- op-node/rollup/derive/l1_block_info_test.go | 3 +- op-node/rollup/derive/span_batch.go | 4 +- op-node/rollup/derive/span_batch_test.go | 10 +- op-node/rollup/driver/origin_selector.go | 8 +- op-node/rollup/driver/origin_selector_test.go | 14 +- op-node/rollup/driver/sequencer.go | 4 +- op-node/rollup/driver/sequencer_test.go | 14 +- op-node/rollup/types.go | 24 +-- op-service/eth/types.go | 5 + 15 files changed, 186 insertions(+), 175 deletions(-) diff --git a/op-node/rollup/chain_spec_test.go b/op-node/rollup/chain_spec_test.go index b6547835cd..b006bdf704 100644 --- a/op-node/rollup/chain_spec_test.go +++ b/op-node/rollup/chain_spec_test.go @@ -33,7 +33,7 @@ var testConfig = Config{ GasLimit: 30_000_000, }, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index b0d7f76a02..7a390e5709 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io" "math/big" "math/rand" @@ -55,7 +56,7 @@ func b(chainId *big.Int, timestamp uint64, epoch eth.L1BlockRef) *SingularBatch txData, _ := tx.MarshalBinary() return &SingularBatch{ ParentHash: mockHash(timestamp-2, 2), - Timestamp: timestamp, + Timestamp: timestamp * 1000, EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, Transactions: []hexutil.Bytes{txData}, @@ -101,10 +102,10 @@ func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint txs = append(txs, batch.Transactions...) return eth.ExecutionPayloadEnvelope{ ExecutionPayload: ð.ExecutionPayload{ - BlockHash: mockHash(batch.Timestamp, 2), + BlockHash: mockHash(batch.Timestamp/1000, 2), ParentHash: batch.ParentHash, BlockNumber: hexutil.Uint64(blockNumber), - Timestamp: hexutil.Uint64(batch.Timestamp), + Timestamp: hexutil.Uint64(batch.Timestamp / 1000), Transactions: txs, }, } @@ -112,7 +113,7 @@ func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint func singularBatchToBlockRef(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.L2BlockRef { return eth.L2BlockRef{ - Hash: mockHash(batch.Timestamp, 2), + Hash: mockHash(batch.Timestamp/1000, 2), Number: blockNumber, ParentHash: batch.ParentHash, Time: batch.Timestamp, @@ -251,12 +252,12 @@ func BatchQueueEager(t *testing.T, batchType int) { // expected output of BatchQueue.NextBatch() expectedOutputBatches := []*SingularBatch{ - b(cfg.L2ChainID, 12000, l1[0]), - b(cfg.L2ChainID, 14000, l1[0]), - b(cfg.L2ChainID, 16000, l1[0]), - b(cfg.L2ChainID, 18000, l1[0]), - b(cfg.L2ChainID, 20000, l1[0]), - b(cfg.L2ChainID, 22000, l1[0]), + b(cfg.L2ChainID, 12, l1[0]), + b(cfg.L2ChainID, 14, l1[0]), + b(cfg.L2ChainID, 16, l1[0]), + b(cfg.L2ChainID, 18, l1[0]), + b(cfg.L2ChainID, 20, l1[0]), + b(cfg.L2ChainID, 22, l1[0]), nil, } // expected error of BatchQueue.NextBatch() @@ -289,6 +290,8 @@ func BatchQueueEager(t *testing.T, batchType int) { for i := 0; i < len(expectedOutputBatches); i++ { b, _, e := bq.NextBatch(context.Background(), safeHead) + log.Info("DEBUG: ", ", i=", i, ", b=", b, ", safe_head=", safeHead) + fmt.Printf("DEBUG: i=%v, b=%v, safehead=%v\n", i, b, safeHead) require.ErrorIs(t, e, expectedOutputErrors[i]) if b == nil { require.Nil(t, expectedOutputBatches[i]) @@ -296,7 +299,7 @@ func BatchQueueEager(t *testing.T, batchType int) { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 safeHead.Time += cfg.BlockTime / 1000 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -320,7 +323,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -373,7 +376,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -396,7 +399,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.NotNil(t, b) - require.Equal(t, safeHead.Time+2, b.Timestamp) + require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) require.Equal(t, rollup.Epoch(1), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 @@ -412,7 +415,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { require.Nil(t, e) require.NotNil(t, b) require.Equal(t, rollup.Epoch(2), b.EpochNum) - require.Equal(t, safeHead.Time+2, b.Timestamp) + require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) safeHead.Number += 1 safeHead.Time += 2 safeHead.Hash = mockHash(b.Timestamp, 2) @@ -432,6 +435,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Number: 0, ParentHash: common.Hash{}, Time: 10, + MilliPartTime: 0, L1Origin: l1[0].ID(), SequenceNumber: 0, } @@ -439,7 +443,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -497,22 +501,22 @@ func BatchQueueMissing(t *testing.T, batchType int) { // Check for a generated batch at t = 12 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(12)) + require.Equal(t, b.Timestamp, uint64(12000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Check for generated batch at t = 14 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(14)) + require.Equal(t, b.Timestamp, uint64(14000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Check for the inputted batch at t = 16 b, _, e = bq.NextBatch(context.Background(), safeHead) @@ -521,7 +525,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Advance the origin. At this point the batch with timestamp 18 will be created input.origin = l1[3] @@ -533,7 +537,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { require.Equal(t, e, io.EOF) b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(18)) + require.Equal(t, b.Timestamp, uint64(18000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(1), b.EpochNum) } @@ -556,7 +560,7 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -619,8 +623,8 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.BlockTime / 1000 + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -643,7 +647,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -718,8 +722,8 @@ func BatchQueueShuffle(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.BlockTime / 1000 + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -741,7 +745,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -821,8 +825,8 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.BlockTime / 1000 + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -846,7 +850,7 @@ func TestBatchQueueComplex(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -939,8 +943,8 @@ func TestBatchQueueComplex(t *testing.T) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.BlockTime / 1000 + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -964,7 +968,7 @@ func TestBatchQueueResetSpan(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -997,8 +1001,8 @@ func TestBatchQueueResetSpan(t *testing.T) { // This NextBatch() will return the second singular batch. safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(nextBatch.Timestamp, 2) + safeHead.Time += cfg.BlockTime / 1000 + safeHead.Hash = mockHash(nextBatch.Timestamp/1000, 2) safeHead.L1Origin = nextBatch.Epoch() nextBatch, _, err = bq.NextBatch(context.Background(), safeHead) require.NoError(t, err) diff --git a/op-node/rollup/derive/batch_test.go b/op-node/rollup/derive/batch_test.go index 3dc554a593..b7b63ea443 100644 --- a/op-node/rollup/derive/batch_test.go +++ b/op-node/rollup/derive/batch_test.go @@ -78,7 +78,7 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []*SingularBatch { blockCount := 2 + rng.Intn(128) - l2BlockTime := uint64(2) + l2BlockTime := uint64(2) * 1000 // ms var singularBatches []*SingularBatch for i := 0; i < blockCount; i++ { @@ -87,7 +87,7 @@ func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []* } l1BlockNum := rng.Uint64() // make sure oldest timestamp is large enough - singularBatches[0].Timestamp += 256 + singularBatches[0].Timestamp += 256 * 1000 // ms for i := 0; i < blockCount; i++ { originChangedBit := rng.Intn(2) if originChangedBit == 1 { diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index db65a1b0f5..584943eb33 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -125,7 +125,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time); batch.Timestamp > max { + if max := (batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time)) * 1000; batch.Timestamp > max { if len(batch.Transactions) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. @@ -136,7 +136,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchUndecided } nextOrigin := l1Blocks[1] - if batch.Timestamp >= nextOrigin.Time { // check if the next L1 origin could have been adopted + if batch.Timestamp >= nextOrigin.MilliTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -272,7 +272,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1 for i := 0; i < batch.GetBlockCount(); i++ { - if batch.GetBlockTimestamp(i) <= l2SafeHead.Time { + if batch.GetBlockTimestamp(i) <= l2SafeHead.MillisecondTimestamp() { continue } var l1Origin eth.L1BlockRef @@ -297,7 +297,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time); blockTimestamp > max { + if max := (l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time)) * 1000; blockTimestamp > max { if len(batch.GetBlockTransactions(i)) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. diff --git a/op-node/rollup/derive/batches_test.go b/op-node/rollup/derive/batches_test.go index 125fc0f02e..b3eb4309c3 100644 --- a/op-node/rollup/derive/batches_test.go +++ b/op-node/rollup/derive/batches_test.go @@ -63,7 +63,7 @@ func TestValidBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 31, // a genesis time that itself does not align to make it more interesting }, - BlockTime: defaultBlockTime, + BlockTime: defaultBlockTime * 1000, SeqWindowSize: 4, MaxSequencerDrift: 6, } @@ -241,7 +241,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -257,7 +257,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time + 1, // 1 too high + Timestamp: (l2A1.Time + 1) * 1000, // 1 too high Transactions: nil, }, }, @@ -273,13 +273,13 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time, // repeating the same time + Timestamp: l2A0.MillisecondTimestamp(), // repeating the same time Transactions: nil, }, }, Expected: BatchDrop, }, - { + { // TODO: Name: "misaligned timestamp", L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, L2SafeHead: l2A0, @@ -289,7 +289,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low + Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low Transactions: nil, }, }, @@ -305,7 +305,7 @@ func TestValidBatch(t *testing.T) { ParentHash: testutils.RandomHash(rng), EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -321,7 +321,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -337,7 +337,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, // build on top of safe head to continue EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check + Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check Transactions: nil, }, }, @@ -353,7 +353,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -369,7 +369,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochHash: l1C.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -385,7 +385,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -401,7 +401,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }, }, @@ -417,7 +417,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")}, }, }, @@ -434,7 +434,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }, }, @@ -450,7 +450,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -466,7 +466,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, }, @@ -482,7 +482,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -498,7 +498,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -514,7 +514,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{}, // empty tx data }, @@ -532,7 +532,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{types.DepositTxType, 0}, // piece of data alike to a deposit }, @@ -550,7 +550,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, @@ -569,7 +569,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, @@ -588,7 +588,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: l2A2.MillisecondTimestamp() + defaultBlockTime*1000, Transactions: nil, }, }, @@ -607,7 +607,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -627,7 +627,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time + 1, // 1 too high + Timestamp: (l2A1.Time + 1) * 1000, // 1 too high Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -647,7 +647,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low + Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -667,7 +667,7 @@ func TestValidBatch(t *testing.T) { ParentHash: testutils.RandomHash(rng), EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -687,7 +687,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -707,13 +707,13 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, // build on top of safe head to continue EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check + Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check Transactions: nil, }, { EpochNum: rollup.Epoch(l1B.Number), EpochHash: l1B.Hash, // pass the l1 origin check - Timestamp: l2B0.Time + defaultBlockTime*2, + Timestamp: (l2B0.Time + defaultBlockTime*2) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -733,7 +733,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -753,14 +753,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -780,7 +780,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochHash: l1C.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -800,7 +800,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -820,14 +820,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l1A.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -847,7 +847,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -867,7 +867,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -886,14 +886,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -913,7 +913,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -933,7 +933,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -952,14 +952,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, { ParentHash: l2Z0.ParentHash, EpochNum: rollup.Epoch(l2Z0.L1Origin.Number), EpochHash: l2Z0.L1Origin.Hash, - Timestamp: l2Z0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Z0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -979,7 +979,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -999,14 +999,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1026,7 +1026,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1046,14 +1046,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1073,7 +1073,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{}, // empty tx data }, @@ -1095,7 +1095,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{types.DepositTxType, 0}, // piece of data alike to a deposit }, @@ -1117,7 +1117,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1136,7 +1136,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1155,7 +1155,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: (l2A2.Time + defaultBlockTime) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1175,14 +1175,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { // we build l2B0, which starts a new epoch too early ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: (l2A2.Time + defaultBlockTime) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1202,14 +1202,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1228,21 +1228,21 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1261,14 +1261,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1288,14 +1288,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1315,14 +1315,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1342,14 +1342,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1369,21 +1369,21 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.ParentHash, EpochNum: rollup.Epoch(l2A0.L1Origin.Number), EpochHash: l2A0.L1Origin.Hash, - Timestamp: l2A0.Time, + Timestamp: l2A0.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1403,14 +1403,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time + 1, + Timestamp: (l2A0.Time + 1) * 1000, Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A1.Time + 1, + Timestamp: (l2A1.Time + 1) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1430,14 +1430,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time - 1, + Timestamp: (l2A0.Time - 1) * 1000, Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1457,14 +1457,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A3.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1483,7 +1483,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, @@ -1501,7 +1501,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1520,7 +1520,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, @@ -1538,7 +1538,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1582,6 +1582,7 @@ func TestValidBatch(t *testing.T) { if mod := testCase.ConfigMod; mod != nil { mod(rcfg) } + // TODO validity := CheckBatch(ctx, rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client) require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") if expLog := testCase.ExpectedLog; expLog != "" { @@ -1644,14 +1645,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.Time, + Timestamp: l2B1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, // Random generated TX that does not match overlapping block }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.Time, + Timestamp: l2B2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1689,14 +1690,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.Time, + Timestamp: l2B1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.Time, + Timestamp: l2B2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index c89456585f..3d987b3fcf 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -284,9 +284,9 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System if err != nil { return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err)) } - if safe.Time < l1Origin.Time { + if safe.MillisecondTimestamp() < l1Origin.MilliTimestamp() { return NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken", - safe, safe.Time, l1Origin, l1Origin.Time)) + safe, safe.MillisecondTimestamp(), l1Origin, l1Origin.MilliTimestamp())) } // Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from. diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index 81b550250c..75f2374c5d 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -133,6 +133,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { require.Equal(t, L1InfoEcotoneLen, len(depTx.Data)) }) t.Run("first-block ecotone", func(t *testing.T) { + // TODO: rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) zero := uint64(2) @@ -154,7 +155,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2, + BlockTime: 2000, } depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) require.NoError(t, err) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 8370661bd8..871028f885 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -557,7 +557,7 @@ func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { return &RawSpanBatch{ spanBatchPrefix: spanBatchPrefix{ - relTimestamp: span_start.Timestamp - b.GenesisTimestamp, + relTimestamp: span_start.Timestamp - b.GenesisTimestamp*1000, l1OriginNum: uint64(span_end.EpochNum), parentCheck: b.ParentCheck, l1OriginCheck: b.L1OriginCheck, @@ -578,7 +578,7 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et var singularBatches []*SingularBatch originIdx := 0 for _, batch := range b.Batches { - if batch.Timestamp <= l2SafeHead.Time { + if batch.Timestamp <= l2SafeHead.MillisecondTimestamp() { continue } singularBatch := SingularBatch{ diff --git a/op-node/rollup/derive/span_batch_test.go b/op-node/rollup/derive/span_batch_test.go index 6f9fe1e475..8c39696bfb 100644 --- a/op-node/rollup/derive/span_batch_test.go +++ b/op-node/rollup/derive/span_batch_test.go @@ -337,13 +337,13 @@ func TestSpanBatchDerive(t *testing.T) { rng := rand.New(rand.NewSource(0xbab0bab0)) chainID := new(big.Int).SetUint64(rng.Uint64()) - l2BlockTime := uint64(2000) + l2BlockTime := uint64(2) * 1000 //ms for originChangedBit := 0; originChangedBit < 2; originChangedBit++ { singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) safeL2Head := testutils.RandomL2BlockRef(rng) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) - genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 + genesisTimeStamp := 1 + singularBatches[0].Timestamp/1000 - 128 // second spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation @@ -408,7 +408,7 @@ func TestSpanBatchMerge(t *testing.T) { require.NoError(t, err) // check span batch prefix - require.Equal(t, rawSpanBatch.relTimestamp, singularBatches[0].Timestamp-genesisTimeStamp, "invalid relative timestamp") + require.Equal(t, rawSpanBatch.relTimestamp, singularBatches[0].Timestamp-genesisTimeStamp*1000, "invalid relative timestamp") require.Equal(t, rollup.Epoch(rawSpanBatch.l1OriginNum), singularBatches[blockCount-1].EpochNum) require.Equal(t, rawSpanBatch.parentCheck[:], singularBatches[0].ParentHash.Bytes()[:20], "invalid parent check") require.Equal(t, rawSpanBatch.l1OriginCheck[:], singularBatches[blockCount-1].EpochHash.Bytes()[:20], "invalid l1 origin check") @@ -444,8 +444,8 @@ func TestSpanBatchToSingularBatch(t *testing.T) { singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) safeL2Head := testutils.RandomL2BlockRef(rng) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) - safeL2Head.Time = singularBatches[0].Timestamp - 2 - genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 + safeL2Head.Time = singularBatches[0].Timestamp/1000 - 2 + genesisTimeStamp := 1 + singularBatches[0].Timestamp/1000 - 128 // second spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 8f4ecf7746..cc22135e35 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -46,13 +46,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc if err != nil { return eth.L1BlockRef{}, err } - msd := los.spec.MaxSequencerDrift(currentOrigin.Time) + msd := los.spec.MaxSequencerDrift(currentOrigin.Time) * 1000 // ms log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, - "l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd) + "l2_head", l2Head, "l2_head_time_ms", l2Head.MillisecondTimestamp(), "max_seq_drift_ms", msd) // If we are past the sequencer depth, we may want to advance the origin, but need to still // check the time of the next origin. - pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+msd + pastSeqDrift := l2Head.MillisecondTimestamp()+los.cfg.BlockTime > currentOrigin.MilliTimestamp()+msd // Limit the time to fetch next origin block by default refCtx, refCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer refCancel() @@ -93,7 +93,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // of slack. For simplicity, we implement our Sequencer to always start building on the latest // L1 block when we can. // If not pastSeqDrift and next origin receipts not cached, fallback to current origin. - if l2Head.Time+los.cfg.BlockTime >= nextOrigin.Time && (pastSeqDrift || receiptsCached) { + if l2Head.MillisecondTimestamp()+los.cfg.BlockTime >= nextOrigin.MilliTimestamp() && (pastSeqDrift || receiptsCached) { return nextOrigin, nil } diff --git a/op-node/rollup/driver/origin_selector_test.go b/op-node/rollup/driver/origin_selector_test.go index fa3a9c12fe..b02f4f44e0 100644 --- a/op-node/rollup/driver/origin_selector_test.go +++ b/op-node/rollup/driver/origin_selector_test.go @@ -25,7 +25,7 @@ func TestOriginSelectorAdvances(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -67,7 +67,7 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -108,7 +108,7 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -151,7 +151,7 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -191,7 +191,7 @@ func TestOriginSelector_FjordSeqDrift(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, FjordTime: u64ptr(20), // a's timestamp } l1 := &testutils.MockL1Source{} @@ -225,7 +225,7 @@ func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -268,7 +268,7 @@ func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 100c2d2ee1..e4ae1ae280 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -99,7 +99,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { // empty blocks (other than the L1 info deposit and any user deposits). We handle this by // setting NoTxPool to true, which will cause the Sequencer to not include any transactions // from the transaction pool. - attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.spec.MaxSequencerDrift(l1Origin.Time) + attrs.NoTxPool = attrs.MilliTimestamp() > l1Origin.MilliTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 // For the Ecotone activation block we shouldn't include any sequencer transactions. if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { @@ -167,7 +167,7 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { } blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Millisecond - payloadTime := time.UnixMilli(int64(head.Time + d.rollupCfg.BlockTime)) + payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + d.rollupCfg.BlockTime)) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index 9e67563b62..923dfc0768 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -259,7 +259,7 @@ func TestSequencerChaosMonkey(t *testing.T) { testGasLimit := eth.Uint64Quantity(10_000_000) return ð.PayloadAttributes{ - Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.BlockTime), + Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.BlockTime/1000), PrevRandao: eth.Bytes32{}, SuggestedFeeRecipient: common.Address{}, Transactions: []eth.Data{infoDep}, @@ -268,7 +268,7 @@ func TestSequencerChaosMonkey(t *testing.T) { }, nil }) - maxL1BlockTimeGap := uint64(100) + maxL1BlockTimeGap := uint64(100) * 1000 // ms // The origin selector just generates random L1 blocks based on RNG var originErr error originSelector := testOriginSelectorFn(func(ctx context.Context, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) { @@ -282,11 +282,11 @@ func TestSequencerChaosMonkey(t *testing.T) { Time: l1Times[l2Head.L1Origin], } // randomly make a L1 origin appear, if we can even select it - nextL2Time := l2Head.Time + cfg.BlockTime - if nextL2Time <= origin.Time { + nextL2MilliTime := l2Head.MillisecondTimestamp() + cfg.BlockTime + if nextL2MilliTime <= origin.MilliTimestamp() { return origin, nil } - maxTimeIncrement := nextL2Time - origin.Time + maxTimeIncrement := nextL2MilliTime - origin.MilliTimestamp() if maxTimeIncrement > maxL1BlockTimeGap { maxTimeIncrement = maxL1BlockTimeGap } @@ -295,7 +295,7 @@ func TestSequencerChaosMonkey(t *testing.T) { Hash: mockL1Hash(origin.Number + 1), Number: origin.Number + 1, ParentHash: origin.Hash, - Time: origin.Time + 1 + uint64(rng.Int63n(int64(maxTimeIncrement))), + Time: origin.Time + 1 + uint64(rng.Int63n(int64(maxTimeIncrement/1000))), } l1Times[nextOrigin.ID()] = nextOrigin.Time return nextOrigin, nil @@ -375,7 +375,7 @@ func TestSequencerChaosMonkey(t *testing.T) { l2Head := engControl.UnsafeL2Head() t.Logf("avg build time: %s, clock timestamp: %d, L2 head time: %d, L1 origin time: %d, avg txs per block: %f", engControl.avgBuildingTime(), clockFn().Unix(), l2Head.Time, l1Times[l2Head.L1Origin], engControl.avgTxsPerBlock()) require.Equal(t, engControl.totalBuiltBlocks, desiredBlocks, "persist through random errors and build the desired blocks") - require.Equal(t, l2Head.Time, cfg.Genesis.L2Time+uint64(desiredBlocks)*cfg.BlockTime, "reached desired L2 block timestamp") + require.Equal(t, l2Head.MillisecondTimestamp(), cfg.Genesis.L2Time*1000+uint64(desiredBlocks)*cfg.BlockTime, "reached desired L2 block timestamp") require.GreaterOrEqual(t, l2Head.Time, l1Times[l2Head.L1Origin], "the L2 time >= the L1 time") require.Less(t, l2Head.Time-l1Times[l2Head.L1Origin], uint64(100), "The L1 origin time is close to the L2 time") require.Less(t, clockTime.Sub(time.Unix(int64(l2Head.Time), 0)).Abs(), 2*time.Second, "L2 time is accurate, within 2 seconds of wallclock") diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 57be9273b5..1c38fdf570 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -428,8 +428,8 @@ func (c *Config) IsFjord(timestamp uint64) bool { // Fjord upgrade. func (c *Config) IsFjordActivationBlock(l2BlockTime uint64) bool { return c.IsFjord(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsFjord(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsFjord(l2BlockTime-c.BlockTime/1000) } // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. @@ -439,34 +439,34 @@ func (c *Config) IsInterop(timestamp uint64) bool { func (c *Config) IsRegolithActivationBlock(l2BlockTime uint64) bool { return c.IsRegolith(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsRegolith(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsRegolith(l2BlockTime-c.BlockTime/1000) } func (c *Config) IsCanyonActivationBlock(l2BlockTime uint64) bool { return c.IsCanyon(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsCanyon(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsCanyon(l2BlockTime-c.BlockTime/1000) } func (c *Config) IsDeltaActivationBlock(l2BlockTime uint64) bool { return c.IsDelta(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsDelta(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsDelta(l2BlockTime-c.BlockTime/1000) } // IsEcotoneActivationBlock returns whether the specified block is the first block subject to the // Ecotone upgrade. Ecotone activation at genesis does not count. func (c *Config) IsEcotoneActivationBlock(l2BlockTime uint64) bool { return c.IsEcotone(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsEcotone(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsEcotone(l2BlockTime-c.BlockTime/1000) } func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { return c.IsInterop(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsInterop(l2BlockTime-c.BlockTime) + l2BlockTime >= c.BlockTime/1000 && + !c.IsInterop(l2BlockTime-c.BlockTime/1000) } // ForkchoiceUpdatedVersion returns the EngineAPIMethod suitable for the chain hard fork version. diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 290cffd3e7..7a6fb6fee0 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -328,6 +328,11 @@ type PayloadAttributes struct { GasLimit *Uint64Quantity `json:"gasLimit,omitempty"` } +func (pa *PayloadAttributes) MilliTimestamp() uint64 { + // TODO: + return uint64(pa.Timestamp) * 1000 +} + type ExecutePayloadStatus string const ( From 8520a2f1fc25fe5ad5ad32aced66bcaf423299bb Mon Sep 17 00:00:00 2001 From: 2020xibao <2020xibao@gmail.com> Date: Fri, 28 Feb 2025 22:25:11 +0800 Subject: [PATCH 03/30] fix: make e2e succeed --- op-chain-ops/genesis/config.go | 8 ++++++-- op-e2e/actions/blocktime_test.go | 6 +++--- op-e2e/actions/dencun_fork_test.go | 5 ++++- op-e2e/actions/l2_sequencer_test.go | 4 ++-- op-e2e/actions/user_test.go | 8 ++++++-- op-e2e/e2eutils/setup.go | 5 +++++ op-e2e/op_geth.go | 2 +- op-e2e/op_geth_test.go | 3 +++ op-e2e/setup.go | 4 ++++ op-e2e/system_adminrpc_test.go | 2 +- op-node/p2p/sync.go | 2 +- op-node/rollup/derive/batches.go | 2 +- op-node/rollup/driver/origin_selector.go | 6 +++++- op-node/rollup/types.go | 12 ++++++------ op-node/rollup/types_test.go | 6 +++--- op-program/client/l2/engineapi/block_processor.go | 2 ++ op-service/sources/l2_client.go | 4 ++-- 17 files changed, 55 insertions(+), 26 deletions(-) diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index e8234d978a..2b614d66be 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -60,7 +60,7 @@ type DeployConfig struct { L1ChainID uint64 `json:"l1ChainID"` // L2ChainID is the chain ID of the L2 chain. L2ChainID uint64 `json:"l2ChainID"` - // L2BlockTime is the number of seconds between each L2 block. + // L2BlockTime is the number of seconds between each L2 block. // millisecond L2BlockTime uint64 `json:"l2BlockTime"` // FinalizationPeriodSeconds represents the number of seconds before an output is considered // finalized. This impacts the amount of time that withdrawals take to finalize and is @@ -434,8 +434,12 @@ func (d *DeployConfig) Check() error { return fmt.Errorf("%w: GovernanceToken owner cannot be address(0)", ErrInvalidDeployConfig) } } + if d.L2BlockTime <= 3 { + // convert ms l2 time interval + d.L2BlockTime = d.L2BlockTime * 1000 + } // L2 block time must always be smaller than L1 block time - if d.L1BlockTime < d.L2BlockTime { + if d.L1BlockTime*1000 < d.L2BlockTime { // TODO: tmp adjust, l1 interval is second timstamp and l2 interval is millisecond. return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2BlockTime, d.L1BlockTime) } if d.RequiredProtocolVersion == (params.ProtocolVersion{}) { diff --git a/op-e2e/actions/blocktime_test.go b/op-e2e/actions/blocktime_test.go index 90a38be9a3..2d75c68732 100644 --- a/op-e2e/actions/blocktime_test.go +++ b/op-e2e/actions/blocktime_test.go @@ -48,9 +48,9 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) applyDeltaTimeOffset(dp, deltaTimeOffset) dp.DeployConfig.SequencerWindowSize = 4 - dp.DeployConfig.L2BlockTime = 2 + dp.DeployConfig.L2BlockTime = 2 // second - sd := e2eutils.Setup(t, dp, defaultAlloc) + sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime will be changed to 2000 log := testlog.Logger(t, log.LevelDebug) sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log) @@ -162,7 +162,7 @@ func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { dp.DeployConfig.SequencerWindowSize = 4 dp.DeployConfig.MaxSequencerDrift = 32 applyDeltaTimeOffset(dp, deltaTimeOffset) - sd := e2eutils.Setup(t, dp, defaultAlloc) + sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime = 2000 log := testlog.Logger(t, log.LevelDebug) sd, _, miner, sequencer, sequencerEngine, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) diff --git a/op-e2e/actions/dencun_fork_test.go b/op-e2e/actions/dencun_fork_test.go index 5e0f1706cb..6a607ecc7f 100644 --- a/op-e2e/actions/dencun_fork_test.go +++ b/op-e2e/actions/dencun_fork_test.go @@ -124,7 +124,10 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { cancunOffset := hexutil.Uint64(0) dp.DeployConfig.L1CancunTimeOffset = &cancunOffset // This test wil fork on the second block - offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2) + if dp.DeployConfig.L2BlockTime <= 3 { + dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000 // millisecond + } + offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime / 1000 * 2) // second dp.DeployConfig.L2GenesisCanyonTimeOffset = &offset dp.DeployConfig.L2GenesisDeltaTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset diff --git a/op-e2e/actions/l2_sequencer_test.go b/op-e2e/actions/l2_sequencer_test.go index 05fa0242d1..745a27855e 100644 --- a/op-e2e/actions/l2_sequencer_test.go +++ b/op-e2e/actions/l2_sequencer_test.go @@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { origin := miner.l1Chain.CurrentBlock() // L2 makes blocks to catch up - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 < origin.Time { makeL2BlockWithAliceTx() require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") } @@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { sequencer.ActL1HeadSignal(t) // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { sequencer.ActL2KeepL1Origin(t) makeL2BlockWithAliceTx() require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") diff --git a/op-e2e/actions/user_test.go b/op-e2e/actions/user_test.go index c9692c91f0..4f28c9f7fa 100644 --- a/op-e2e/actions/user_test.go +++ b/op-e2e/actions/user_test.go @@ -118,11 +118,15 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { dp.DeployConfig.L2GenesisEcotoneTimeOffset = test.ecotoneTime dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime + if dp.DeployConfig.L2BlockTime <= 3 { + dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000 + } + if test.canyonTime != nil { - require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime), "canyon fork must be aligned") + require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "canyon fork must be aligned") } if test.ecotoneTime != nil { - require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime), "ecotone fork must be aligned") + require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "ecotone fork must be aligned") } sd := e2eutils.Setup(t, dp, defaultAlloc) diff --git a/op-e2e/e2eutils/setup.go b/op-e2e/e2eutils/setup.go index a86f28c5da..7f42ed56de 100644 --- a/op-e2e/e2eutils/setup.go +++ b/op-e2e/e2eutils/setup.go @@ -187,6 +187,11 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * PlasmaConfig: pcfg, } + if rollupCfg.BlockTime <= 3 { + // covert to ms timestamp + rollupCfg.BlockTime = rollupCfg.BlockTime * 1000 + } + require.NoError(t, rollupCfg.Check()) // Sanity check that the config is correct diff --git a/op-e2e/op_geth.go b/op-e2e/op_geth.go index 7cea17d43a..1422a23a55 100644 --- a/op-e2e/op_geth.go +++ b/op-e2e/op_geth.go @@ -212,7 +212,7 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri // CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions. func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) { timestamp := d.L2Head.Timestamp + 2 - l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp)) + l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp*1000) /*ms*/) if err != nil { return nil, err } diff --git a/op-e2e/op_geth_test.go b/op-e2e/op_geth_test.go index 90b117084c..a65bfe333c 100644 --- a/op-e2e/op_geth_test.go +++ b/op-e2e/op_geth_test.go @@ -435,6 +435,9 @@ func TestRegolith(t *testing.T) { // We also need to setup a L1 Genesis to create the rollup genesis. cfg := DefaultSystemConfig(t) cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime + if cfg.DeployConfig.L2BlockTime <= 3 { + cfg.DeployConfig.L2BlockTime = cfg.DeployConfig.L2BlockTime * 1000 // ms + } ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() diff --git a/op-e2e/setup.go b/op-e2e/setup.go index b0c969639a..acbb423df6 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -547,6 +547,10 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste return nil, err } sys.RollupConfig = &defaultConfig + if sys.RollupConfig.BlockTime <= 3 { + // covert ms timestamp + sys.RollupConfig.BlockTime = sys.RollupConfig.BlockTime * 1000 + } // Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2 bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"), diff --git a/op-e2e/system_adminrpc_test.go b/op-e2e/system_adminrpc_test.go index e7c0af673b..f5227b26db 100644 --- a/op-e2e/system_adminrpc_test.go +++ b/op-e2e/system_adminrpc_test.go @@ -56,7 +56,7 @@ func TestStopStartSequencer(t *testing.T) { require.False(t, active, "sequencer should be inactive") blockBefore := latestBlock(t, l2Seq) - time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second) + time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime/1000+1) * time.Second) blockAfter := latestBlock(t, l2Seq) require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer") diff --git a/op-node/p2p/sync.go b/op-node/p2p/sync.go index c46eb6b365..3872526be0 100644 --- a/op-node/p2p/sync.go +++ b/op-node/p2p/sync.go @@ -878,7 +878,7 @@ func (srv *ReqRespServer) handleSyncRequest(ctx context.Context, stream network. if req < srv.cfg.Genesis.L2.Number { return req, fmt.Errorf("cannot serve request for L2 block %d before genesis %d: %w", req, srv.cfg.Genesis.L2.Number, invalidRequestErr) } - max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().Unix())) + max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().UnixMilli())) if err != nil { return req, fmt.Errorf("cannot determine max target block number to verify request: %w", invalidRequestErr) } diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 584943eb33..a60cb089bd 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -73,7 +73,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchFuture } if batch.Timestamp < nextMilliTimestamp { - log.Warn("dropping batch with old timestamp", "min_timestamp", nextMilliTimestamp) + log.Warn("dropping batch with old timestamp", "batch_timestamp", batch.Timestamp, "min_timestamp", nextMilliTimestamp) return BatchDrop } diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index cc22135e35..5539d77f7d 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -57,7 +57,11 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc refCtx, refCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer refCancel() if pastSeqDrift { - log.Warn("Next L2 block time is past the sequencer drift + current origin time") + log.Warn("Next L2 block time is past the sequencer drift + current origin time", + "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), + "l2_block_ms_interval", los.cfg.BlockTime, + "l1_origin_ms_timestamp", currentOrigin.MilliTimestamp(), + "max_ms_drift", msd) // Must fetch next L1 block as long as it may take, cause we are pastSeqDrift refCtx = ctx } diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 1c38fdf570..839d111aa0 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -193,18 +193,18 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 } func (cfg *Config) TimestampForBlock(blockNumber uint64) uint64 { - return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.BlockTime) + return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.BlockTime / 1000) } -func (cfg *Config) TargetBlockNumber(timestamp uint64) (num uint64, err error) { +func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that // difference by the block time to get the expected L2 block number at the current time. If the // unsafe head does not have this block number, then there is a gap in the queue. - genesisTimestamp := cfg.Genesis.L2Time - if timestamp < genesisTimestamp { - return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisTimestamp) + genesisMilliTimestamp := cfg.Genesis.L2Time * 1000 + if milliTimestamp < genesisMilliTimestamp { + return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisMilliTimestamp) } - wallClockGenesisDiff := timestamp - genesisTimestamp + wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp // Note: round down, we should not request blocks into the future. blocksSinceGenesis := wallClockGenesisDiff / cfg.BlockTime return cfg.Genesis.L2.Number + blocksSinceGenesis, nil diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index 248fe3eaad..d427104a58 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -534,7 +534,7 @@ func TestTimestampForBlock(t *testing.T) { name: "FirstBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 0, expectedBlockTime: 100, }, @@ -542,7 +542,7 @@ func TestTimestampForBlock(t *testing.T) { name: "SecondBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 1, expectedBlockTime: 102, }, @@ -550,7 +550,7 @@ func TestTimestampForBlock(t *testing.T) { name: "NBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 25, expectedBlockTime: 150, }, diff --git a/op-program/client/l2/engineapi/block_processor.go b/op-program/client/l2/engineapi/block_processor.go index d81212db20..b8850f86e2 100644 --- a/op-program/client/l2/engineapi/block_processor.go +++ b/op-program/client/l2/engineapi/block_processor.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -70,6 +71,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* } parentHeader := provider.GetHeaderByHash(header.ParentHash) if header.Time <= parentHeader.Time { + log.Error("invalid timestamp", "header", header, "parent_header", parentHeader) return nil, errors.New("invalid timestamp") } statedb, err := provider.StateAt(parentHeader.Root) diff --git a/op-service/sources/l2_client.go b/op-service/sources/l2_client.go index 812ddcab7c..7c60087464 100644 --- a/op-service/sources/l2_client.go +++ b/op-service/sources/l2_client.go @@ -32,9 +32,9 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig span := int(config.SeqWindowSize) * 3 / 2 // Estimate number of L2 blocks in this span of L1 blocks // (there's always one L2 block per L1 block, L1 is thus the minimum, even if block time is very high) - if config.BlockTime < 12 && config.BlockTime > 0 { + if config.BlockTime/1000 < 12 && config.BlockTime/1000 > 0 { span *= 12 - span /= int(config.BlockTime) + span /= int(config.BlockTime / 1000) } fullSpan := span if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large From a717b8cca43a6fe2d19d0d46bea6aa96004d37f5 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Mon, 3 Mar 2025 18:58:44 +0800 Subject: [PATCH 04/30] support millisecond-level timestamps for L1 block --- op-node/rollup/derive/attributes.go | 4 ++-- op-node/service.go | 10 ++++++++-- op-service/eth/block_info.go | 29 ++++++++++++++++++++++------- op-service/eth/heads.go | 11 ++++++++++- op-service/eth/id.go | 7 +++---- op-service/sources/types.go | 12 +++++++++--- op-service/testutils/l1info.go | 7 ++++++- 7 files changed, 60 insertions(+), 20 deletions(-) diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 2dc32e09cf..d245643f42 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -126,9 +126,9 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime - if nextL2MilliTime < l1Info.MilliTime() { + if nextL2MilliTime < l1Info.MillTimestamp() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", - l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MilliTime())) + l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillTimestamp())) } var upgradeTxs []hexutil.Bytes diff --git a/op-node/service.go b/op-node/service.go index fa54cf0bca..e36001fe59 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -27,6 +27,12 @@ import ( opflags "github.com/ethereum-optimism/optimism/op-service/flags" ) +const ( + MinBlockTimeSeconds = 1 + MaxBlockTimeSeconds = 3 + MaxBlockTimeMs = 750 +) + // NewConfig creates a Config from the provided flags or environment variables. func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { if err := flags.CheckRequired(ctx); err != nil { @@ -44,11 +50,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { } { - if rollupConfig.BlockTime >= 1 && rollupConfig.BlockTime <= 3 { + if rollupConfig.BlockTime >= MinBlockTimeSeconds && rollupConfig.BlockTime <= MaxBlockTimeSeconds { // Convert legacy second-level timestamp to millisecond timestamp, // This is a compatibility behavior. rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 - } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > 750 { + } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > MaxBlockTimeMs { return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) } // rollupConfig.BlockTime is millisecond block interval diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index f712beeeda..3a3454d71c 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" ) type BlockInfo interface { @@ -16,7 +17,8 @@ type BlockInfo interface { Root() common.Hash // state-root NumberU64() uint64 Time() uint64 - MilliTime() uint64 + MillTimestamp() uint64 + MillSeconds() uint64 // MixDigest field, reused for randomness after The Merge (Bellatrix hardfork) MixDigest() common.Hash BaseFee() *big.Int @@ -39,6 +41,7 @@ func InfoToL1BlockRef(info BlockInfo) L1BlockRef { Number: info.NumberU64(), ParentHash: info.ParentHash(), Time: info.Time(), + MsTime: info.MillSeconds(), } } @@ -73,9 +76,15 @@ func (b blockInfo) ParentBeaconRoot() *common.Hash { return b.Block.BeaconRoot() } -func (b blockInfo) MilliTime() uint64 { - // TODO: adapt L1 timestamp - return b.Block.Time() * 1000 +func (b blockInfo) MillTimestamp() uint64 { + return b.Block.Time()*1000 + b.MillSeconds() +} + +func (b blockInfo) MillSeconds() uint64 { + if b.MixDigest() == (common.Hash{}) { + return 0 + } + return uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() } func BlockToInfo(b *types.Block) BlockInfo { @@ -108,9 +117,15 @@ func (h headerBlockInfo) Time() uint64 { return h.Header.Time } -func (h headerBlockInfo) MilliTime() uint64 { - // TODO: adapt L1 timestamp - return h.Header.Time * 1000 +func (h headerBlockInfo) MillTimestamp() uint64 { + return h.Header.Time*1000 + h.MillSeconds() +} + +func (h headerBlockInfo) MillSeconds() uint64 { + if h.MixDigest() == (common.Hash{}) { + return 0 + } + return uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } func (h headerBlockInfo) MixDigest() common.Hash { diff --git a/op-service/eth/heads.go b/op-service/eth/heads.go index db837cbbed..48915229b7 100644 --- a/op-service/eth/heads.go +++ b/op-service/eth/heads.go @@ -5,9 +5,11 @@ import ( "time" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" ) // HeadSignalFn is used as callback function to accept head-signals @@ -43,11 +45,18 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) ( for { select { case header := <-headChanges: + var mTime uint64 + if header.MixDigest == (common.Hash{}) { + mTime = header.Time + } else { + mTime = uint256.NewInt(0).SetBytes32(header.MixDigest[:]).Uint64() + } fn(eventsCtx, L1BlockRef{ Hash: header.Hash(), Number: header.Number.Uint64(), ParentHash: header.ParentHash, - Time: header.Time, + Time: mTime, + MsTime: mTime, }) case <-eventsCtx.Done(): return nil diff --git a/op-service/eth/id.go b/op-service/eth/id.go index e221508e31..6fdb35885c 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -58,13 +58,12 @@ type L1BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` - // TODO: + Time uint64 `json:"timestamp"` // second timestamp + MsTime uint64 `json:"msTimestamp"` // support millisecond } func (id L1BlockRef) MilliTimestamp() uint64 { - // TODO: adapt L1 - return id.Time * 1000 + return id.Time*1000 + id.MsTime } func (id L1BlockRef) String() string { diff --git a/op-service/sources/types.go b/op-service/sources/types.go index b57beb7db8..05237e8e0d 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -63,9 +63,15 @@ func (h headerInfo) Time() uint64 { return h.Header.Time } -func (h headerInfo) MilliTime() uint64 { - // TODO: adapt L1 timestamp - return h.Header.Time * 1000 +func (h headerInfo) MillTimestamp() uint64 { + return h.Header.Time*1000 + h.MillSeconds() +} + +func (h headerInfo) MillSeconds() uint64 { + if h.MixDigest() == (common.Hash{}) { + return 0 + } + return uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } func (h headerInfo) MixDigest() common.Hash { diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index dcc45604df..c5d0cc1b34 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -21,6 +21,7 @@ type MockBlockInfo struct { InfoRoot common.Hash InfoNum uint64 InfoTime uint64 + InfoMTime uint64 InfoMixDigest [32]byte InfoBaseFee *big.Int InfoBlobBaseFee *big.Int @@ -56,10 +57,14 @@ func (l *MockBlockInfo) Time() uint64 { return l.InfoTime } -func (l *MockBlockInfo) MilliTime() uint64 { +func (l *MockBlockInfo) MillTimestamp() uint64 { return l.InfoTime * 1000 } +func (l *MockBlockInfo) MillSeconds() uint64 { + return l.InfoMTime +} + func (l *MockBlockInfo) MixDigest() common.Hash { return l.InfoMixDigest } From 618dbe04ae8e7bc431fc83b6638e9ca303742ba3 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 4 Mar 2025 11:05:11 +0800 Subject: [PATCH 05/30] fix l1 header subscription --- op-node/rollup/derive/attributes.go | 4 ++-- op-node/rollup/driver/sequencer.go | 7 +++--- op-service/eth/block_info.go | 36 ++++++++++++++--------------- op-service/eth/heads.go | 8 +++---- op-service/sources/types.go | 13 ++++------- op-service/testutils/l1info.go | 8 ++----- 6 files changed, 33 insertions(+), 43 deletions(-) diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index d245643f42..bb012389eb 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -126,9 +126,9 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime - if nextL2MilliTime < l1Info.MillTimestamp() { + if nextL2MilliTime < l1Info.MilliTimestamp() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", - l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillTimestamp())) + l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MilliTimestamp())) } var upgradeTxs []hexutil.Bytes diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index e4ae1ae280..c3092bf661 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -231,22 +231,23 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) if err != nil { + backoffTime := time.Millisecond * time.Duration(d.rollupCfg.BlockTime) if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(backoffTime) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer failed temporarily to seal new block", "err", err) - d.nextAction = d.timeNow().Add(time.Second) + d.nextAction = d.timeNow().Add(backoffTime) // We don't explicitly cancel block building jobs upon temporary errors: we may still finish the block. // Any unfinished block building work eventually times out, and will be cleaned up that way. } else { d.log.Error("sequencer failed to seal block with unclassified error", "err", err) - d.nextAction = d.timeNow().Add(time.Second) + d.nextAction = d.timeNow().Add(backoffTime) d.CancelBuildingBlock(ctx) } return nil, nil diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index 3a3454d71c..8a0e42c7c4 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -17,8 +17,7 @@ type BlockInfo interface { Root() common.Hash // state-root NumberU64() uint64 Time() uint64 - MillTimestamp() uint64 - MillSeconds() uint64 + MilliTimestamp() uint64 // MixDigest field, reused for randomness after The Merge (Bellatrix hardfork) MixDigest() common.Hash BaseFee() *big.Int @@ -36,12 +35,16 @@ type BlockInfo interface { } func InfoToL1BlockRef(info BlockInfo) L1BlockRef { + milliseconds := uint64(0) + if info.MixDigest() != (common.Hash{}) { + milliseconds = uint256.NewInt(0).SetBytes32(info.MixDigest().Bytes()).Uint64() + } return L1BlockRef{ Hash: info.Hash(), Number: info.NumberU64(), ParentHash: info.ParentHash(), Time: info.Time(), - MsTime: info.MillSeconds(), + MsTime: milliseconds, } } @@ -76,15 +79,12 @@ func (b blockInfo) ParentBeaconRoot() *common.Hash { return b.Block.BeaconRoot() } -func (b blockInfo) MillTimestamp() uint64 { - return b.Block.Time()*1000 + b.MillSeconds() -} - -func (b blockInfo) MillSeconds() uint64 { - if b.MixDigest() == (common.Hash{}) { - return 0 +func (b blockInfo) MilliTimestamp() uint64 { + milliseconds := uint64(0) + if b.MixDigest() != (common.Hash{}) { + milliseconds = uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() } - return uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() + return b.Block.Time()*1000 + milliseconds } func BlockToInfo(b *types.Block) BlockInfo { @@ -117,15 +117,13 @@ func (h headerBlockInfo) Time() uint64 { return h.Header.Time } -func (h headerBlockInfo) MillTimestamp() uint64 { - return h.Header.Time*1000 + h.MillSeconds() -} - -func (h headerBlockInfo) MillSeconds() uint64 { - if h.MixDigest() == (common.Hash{}) { - return 0 +func (h headerBlockInfo) MilliTimestamp() uint64 { + milliseconds := uint64(0) + if h.MixDigest() != (common.Hash{}) { + milliseconds = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } - return uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + + return h.Header.Time*1000 + milliseconds } func (h headerBlockInfo) MixDigest() common.Hash { diff --git a/op-service/eth/heads.go b/op-service/eth/heads.go index 48915229b7..871590af91 100644 --- a/op-service/eth/heads.go +++ b/op-service/eth/heads.go @@ -45,17 +45,15 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) ( for { select { case header := <-headChanges: - var mTime uint64 - if header.MixDigest == (common.Hash{}) { - mTime = header.Time - } else { + mTime := uint64(0) + if header.MixDigest != (common.Hash{}) { mTime = uint256.NewInt(0).SetBytes32(header.MixDigest[:]).Uint64() } fn(eventsCtx, L1BlockRef{ Hash: header.Hash(), Number: header.Number.Uint64(), ParentHash: header.ParentHash, - Time: mTime, + Time: header.Time, MsTime: mTime, }) case <-eventsCtx.Done(): diff --git a/op-service/sources/types.go b/op-service/sources/types.go index 05237e8e0d..2a38f47a9f 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -63,15 +63,12 @@ func (h headerInfo) Time() uint64 { return h.Header.Time } -func (h headerInfo) MillTimestamp() uint64 { - return h.Header.Time*1000 + h.MillSeconds() -} - -func (h headerInfo) MillSeconds() uint64 { - if h.MixDigest() == (common.Hash{}) { - return 0 +func (h headerInfo) MilliTimestamp() uint64 { + milliseconds := uint64(0) + if h.MixDigest() != (common.Hash{}) { + milliseconds = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } - return uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + return h.Header.Time*1000 + milliseconds } func (h headerInfo) MixDigest() common.Hash { diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index c5d0cc1b34..aaa9feb755 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -57,12 +57,8 @@ func (l *MockBlockInfo) Time() uint64 { return l.InfoTime } -func (l *MockBlockInfo) MillTimestamp() uint64 { - return l.InfoTime * 1000 -} - -func (l *MockBlockInfo) MillSeconds() uint64 { - return l.InfoMTime +func (l *MockBlockInfo) MilliTimestamp() uint64 { + return l.InfoTime*1000 + l.InfoMTime } func (l *MockBlockInfo) MixDigest() common.Hash { From 7115b2a952977e34b03945d0c39db0c4f32e5287 Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Tue, 4 Mar 2025 18:45:26 +0800 Subject: [PATCH 06/30] chore: polish millisecond-related code by review tips (#267) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- op-batcher/batcher/channel_builder.go | 2 +- op-batcher/batcher/channel_manager.go | 2 +- op-chain-ops/genesis/config.go | 31 ++++++++++++-- op-e2e/actions/blocktime_test.go | 6 +-- op-e2e/actions/dencun_fork_test.go | 5 +-- op-e2e/actions/l2_sequencer_test.go | 4 +- op-e2e/actions/user_test.go | 8 +--- op-e2e/op_geth.go | 10 ++--- op-e2e/op_geth_test.go | 3 -- op-e2e/setup.go | 1 + op-e2e/system_adminrpc_test.go | 2 +- op-node/p2p/host_test.go | 1 + op-node/rollup/derive/attributes.go | 8 ++-- op-node/rollup/derive/attributes_queue.go | 2 +- op-node/rollup/derive/batch_queue.go | 8 ++-- op-node/rollup/derive/batch_queue_test.go | 14 +++---- op-node/rollup/derive/batches.go | 18 ++++---- op-node/rollup/derive/engine_queue.go | 4 +- op-node/rollup/derive/l1_block_info_test.go | 1 - op-node/rollup/derive/l2block_util.go | 2 +- op-node/rollup/derive/payload_util.go | 2 +- op-node/rollup/derive/span_batch.go | 5 ++- op-node/rollup/driver/origin_selector.go | 6 +-- op-node/rollup/driver/sequencer.go | 14 +++---- op-node/rollup/driver/sequencer_test.go | 6 +-- op-node/rollup/driver/state.go | 2 +- op-node/rollup/types.go | 46 ++++++++++++++------- op-node/service.go | 10 ++--- op-service/eth/block_info.go | 24 +++++------ op-service/eth/heads.go | 2 +- op-service/eth/id.go | 14 +++---- op-service/eth/types.go | 4 ++ op-service/sources/l2_client.go | 4 +- op-service/sources/types.go | 8 ++-- op-service/testutils/l1info.go | 6 +-- 35 files changed, 163 insertions(+), 122 deletions(-) diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index d63e1d45b5..bed788e766 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -76,7 +76,7 @@ type ChannelBuilder struct { outputBytes int } -// newChannelBuilder creates a new channel builder or returns an error if the +// NewChannelBuilder creates a new channel builder or returns an error if the // channel out could not be created. // it acts as a factory for either a span or singular channel out func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) { diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index d2e9d86a02..c9fa50b4cf 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -368,7 +368,7 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo Number: block.NumberU64(), ParentHash: block.ParentHash(), Time: block.Time(), - MilliPartTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts millisecond part + MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts millisecond part L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number}, SequenceNumber: l1info.SequenceNumber, } diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 2b614d66be..2adf5e62fc 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -60,7 +60,7 @@ type DeployConfig struct { L1ChainID uint64 `json:"l1ChainID"` // L2ChainID is the chain ID of the L2 chain. L2ChainID uint64 `json:"l2ChainID"` - // L2BlockTime is the number of seconds between each L2 block. // millisecond + // L2BlockTime is the number of seconds between each L2 block. L2BlockTime uint64 `json:"l2BlockTime"` // FinalizationPeriodSeconds represents the number of seconds before an output is considered // finalized. This impacts the amount of time that withdrawals take to finalize and is @@ -305,6 +305,29 @@ type DeployConfig struct { UseInterop bool `json:"useInterop,omitempty"` } +func (d *DeployConfig) L1MillisecondBlockInterval() uint64 { + // convert second to millisecond + return d.L1BlockTime * 1000 +} + +func (d *DeployConfig) L2MillisecondBlockInterval() uint64 { + if d.L2BlockTime > 3 { + // has been millisecond + return d.L2BlockTime + } + // convert second to millisecond + return d.L2BlockTime * 1000 +} + +func (d *DeployConfig) L2SecondBlockInterval() uint64 { + if d.L2BlockTime <= 3 { + // has been second + return d.L2BlockTime + } + // convert millisecond to second + return d.L2BlockTime / 1000 +} + // Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy // which makes it easier to maintain, we do not need efficiency in this case. func (d *DeployConfig) Copy() *DeployConfig { @@ -435,12 +458,14 @@ func (d *DeployConfig) Check() error { } } if d.L2BlockTime <= 3 { + // TODO: too many tests depend it, tmp work around it // convert ms l2 time interval d.L2BlockTime = d.L2BlockTime * 1000 } + // L2 block time must always be smaller than L1 block time - if d.L1BlockTime*1000 < d.L2BlockTime { // TODO: tmp adjust, l1 interval is second timstamp and l2 interval is millisecond. - return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2BlockTime, d.L1BlockTime) + if d.L1MillisecondBlockInterval() < d.L2MillisecondBlockInterval() { + return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2MillisecondBlockInterval(), d.L1MillisecondBlockInterval()) } if d.RequiredProtocolVersion == (params.ProtocolVersion{}) { log.Warn("RequiredProtocolVersion is empty") diff --git a/op-e2e/actions/blocktime_test.go b/op-e2e/actions/blocktime_test.go index 2d75c68732..90a38be9a3 100644 --- a/op-e2e/actions/blocktime_test.go +++ b/op-e2e/actions/blocktime_test.go @@ -48,9 +48,9 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) applyDeltaTimeOffset(dp, deltaTimeOffset) dp.DeployConfig.SequencerWindowSize = 4 - dp.DeployConfig.L2BlockTime = 2 // second + dp.DeployConfig.L2BlockTime = 2 - sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime will be changed to 2000 + sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LevelDebug) sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log) @@ -162,7 +162,7 @@ func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { dp.DeployConfig.SequencerWindowSize = 4 dp.DeployConfig.MaxSequencerDrift = 32 applyDeltaTimeOffset(dp, deltaTimeOffset) - sd := e2eutils.Setup(t, dp, defaultAlloc) // dp.DeployConfig.L2BlockTime = 2000 + sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LevelDebug) sd, _, miner, sequencer, sequencerEngine, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) diff --git a/op-e2e/actions/dencun_fork_test.go b/op-e2e/actions/dencun_fork_test.go index 6a607ecc7f..ead5a098ac 100644 --- a/op-e2e/actions/dencun_fork_test.go +++ b/op-e2e/actions/dencun_fork_test.go @@ -124,10 +124,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { cancunOffset := hexutil.Uint64(0) dp.DeployConfig.L1CancunTimeOffset = &cancunOffset // This test wil fork on the second block - if dp.DeployConfig.L2BlockTime <= 3 { - dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000 // millisecond - } - offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime / 1000 * 2) // second + offset := hexutil.Uint64(dp.DeployConfig.L2SecondBlockInterval() * 2) dp.DeployConfig.L2GenesisCanyonTimeOffset = &offset dp.DeployConfig.L2GenesisDeltaTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset diff --git a/op-e2e/actions/l2_sequencer_test.go b/op-e2e/actions/l2_sequencer_test.go index 745a27855e..dc38421fc4 100644 --- a/op-e2e/actions/l2_sequencer_test.go +++ b/op-e2e/actions/l2_sequencer_test.go @@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { origin := miner.l1Chain.CurrentBlock() // L2 makes blocks to catch up - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 < origin.Time { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() < origin.Time { makeL2BlockWithAliceTx() require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") } @@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { sequencer.ActL1HeadSignal(t) // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime/1000 <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { sequencer.ActL2KeepL1Origin(t) makeL2BlockWithAliceTx() require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") diff --git a/op-e2e/actions/user_test.go b/op-e2e/actions/user_test.go index 4f28c9f7fa..53b227777a 100644 --- a/op-e2e/actions/user_test.go +++ b/op-e2e/actions/user_test.go @@ -118,15 +118,11 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { dp.DeployConfig.L2GenesisEcotoneTimeOffset = test.ecotoneTime dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime - if dp.DeployConfig.L2BlockTime <= 3 { - dp.DeployConfig.L2BlockTime = dp.DeployConfig.L2BlockTime * 1000 - } - if test.canyonTime != nil { - require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "canyon fork must be aligned") + require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "canyon fork must be aligned") } if test.ecotoneTime != nil { - require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime/1000), "ecotone fork must be aligned") + require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "ecotone fork must be aligned") } sd := e2eutils.Setup(t, dp, defaultAlloc) diff --git a/op-e2e/op_geth.go b/op-e2e/op_geth.go index 1422a23a55..d356068cf2 100644 --- a/op-e2e/op_geth.go +++ b/op-e2e/op_geth.go @@ -211,8 +211,8 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri // CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions. func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) { - timestamp := d.L2Head.Timestamp + 2 - l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp*1000) /*ms*/) + milliTimestamp := d.L2Head.MillisecondTimestamp() + 2*1000 // 2000 millisecond block interval + l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, milliTimestamp) if err != nil { return nil, err } @@ -228,17 +228,17 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa } var withdrawals *types.Withdrawals - if d.L2ChainConfig.IsCanyon(uint64(timestamp)) { + if d.L2ChainConfig.IsCanyon(milliTimestamp / 1000) { withdrawals = &types.Withdrawals{} } var parentBeaconBlockRoot *common.Hash - if d.L2ChainConfig.IsEcotone(uint64(timestamp)) { + if d.L2ChainConfig.IsEcotone(milliTimestamp / 1000) { parentBeaconBlockRoot = d.L1Head.ParentBeaconRoot() } attrs := eth.PayloadAttributes{ - Timestamp: timestamp, + Timestamp: eth.Uint64Quantity(milliTimestamp / 1000), Transactions: txBytes, NoTxPool: true, GasLimit: (*eth.Uint64Quantity)(&d.SystemConfig.GasLimit), diff --git a/op-e2e/op_geth_test.go b/op-e2e/op_geth_test.go index a65bfe333c..90b117084c 100644 --- a/op-e2e/op_geth_test.go +++ b/op-e2e/op_geth_test.go @@ -435,9 +435,6 @@ func TestRegolith(t *testing.T) { // We also need to setup a L1 Genesis to create the rollup genesis. cfg := DefaultSystemConfig(t) cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime - if cfg.DeployConfig.L2BlockTime <= 3 { - cfg.DeployConfig.L2BlockTime = cfg.DeployConfig.L2BlockTime * 1000 // ms - } ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() diff --git a/op-e2e/setup.go b/op-e2e/setup.go index acbb423df6..3d8659bbac 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -548,6 +548,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste } sys.RollupConfig = &defaultConfig if sys.RollupConfig.BlockTime <= 3 { + // TODO: too many tests depend it, tmp work around it // covert ms timestamp sys.RollupConfig.BlockTime = sys.RollupConfig.BlockTime * 1000 } diff --git a/op-e2e/system_adminrpc_test.go b/op-e2e/system_adminrpc_test.go index f5227b26db..e29e9c7606 100644 --- a/op-e2e/system_adminrpc_test.go +++ b/op-e2e/system_adminrpc_test.go @@ -56,7 +56,7 @@ func TestStopStartSequencer(t *testing.T) { require.False(t, active, "sequencer should be inactive") blockBefore := latestBlock(t, l2Seq) - time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime/1000+1) * time.Second) + time.Sleep(time.Duration(cfg.DeployConfig.L2SecondBlockInterval()+1) * time.Second) blockAfter := latestBlock(t, l2Seq) require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer") diff --git a/op-node/p2p/host_test.go b/op-node/p2p/host_test.go index 3fcfb7714c..986cce2cd3 100644 --- a/op-node/p2p/host_test.go +++ b/op-node/p2p/host_test.go @@ -261,6 +261,7 @@ func TestP2PFull(t *testing.T) { require.NoError(t, p2pClientA.ProtectPeer(ctx, hostB.ID())) require.NoError(t, p2pClientA.UnprotectPeer(ctx, hostB.ID())) + // TODO: } func TestDiscovery(t *testing.T) { diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index bb012389eb..d46409381b 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -108,7 +108,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Calculate bsc block base fee var l1BaseFee *big.Int - if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime) / 1000) { + if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval()) / 1000) { l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch) if err != nil { return nil, err @@ -125,10 +125,10 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee) // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 - nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.BlockTime - if nextL2MilliTime < l1Info.MilliTimestamp() { + nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval() + if nextL2MilliTime < l1Info.MillisecondTimestamp() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", - l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MilliTimestamp())) + l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillisecondTimestamp())) } var upgradeTxs []hexutil.Bytes diff --git a/op-node/rollup/derive/attributes_queue.go b/op-node/rollup/derive/attributes_queue.go index 0361ea707c..e05fd2b5dc 100644 --- a/op-node/rollup/derive/attributes_queue.go +++ b/op-node/rollup/derive/attributes_queue.go @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash)) } // sanity check timestamp - if expected := l2SafeHead.MillisecondTimestamp() + aq.config.BlockTime; expected != batch.Timestamp { + if expected := l2SafeHead.MillisecondTimestamp() + aq.config.MillisecondBlockInterval(); expected != batch.Timestamp { return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected)) } fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index fb796c0af3..b923b1ff84 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si if len(bq.nextSpan) > 0 { // There are cached singular batches derived from the span batch. // Check if the next cached batch matches the given parent block. - if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.BlockTime { + if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.MillisecondBlockInterval() { // Pop first one and return. nextBatch := bq.popNextBatch(parent) // len(bq.nextSpan) == 0 means it's the last batch of the span. @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren // Find the first-seen batch that matches all validity conditions. // We may not have sufficient information to proceed filtering, and then we stop. // There may be none: in that case we force-create an empty batch - nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.BlockTime + nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.MillisecondBlockInterval() var nextBatch *BatchWithL1InclusionBlock // Go over all batches, in order of inclusion, and find the first batch we can accept. @@ -321,7 +321,7 @@ batchLoop: // Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin, // to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a // batch to ensure that we at least have one batch per epoch. - if nextMilliTimestamp < nextEpoch.MilliTimestamp() || firstOfEpoch { + if nextMilliTimestamp < nextEpoch.MillisecondTimestamp() || firstOfEpoch { bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextMilliTimestamp) return &SingularBatch{ ParentHash: parent.Hash, @@ -334,7 +334,7 @@ batchLoop: // At this point we have auto generated every batch for the current epoch // that we can, so we can advance to the next epoch. - bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MilliTimestamp()) + bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MillisecondTimestamp()) bq.l1Blocks = bq.l1Blocks[1:] return nil, io.EOF } diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index 7a390e5709..68eb182461 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -298,7 +298,7 @@ func BatchQueueEager(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -435,7 +435,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Number: 0, ParentHash: common.Hash{}, Time: 10, - MilliPartTime: 0, + MilliTime: 0, L1Origin: l1[0].ID(), SequenceNumber: 0, } @@ -623,7 +623,7 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -722,7 +722,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -825,7 +825,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -943,7 +943,7 @@ func TestBatchQueueComplex(t *testing.T) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -1001,7 +1001,7 @@ func TestBatchQueueResetSpan(t *testing.T) { // This NextBatch() will return the second singular batch. safeHead.Number += 1 - safeHead.Time += cfg.BlockTime / 1000 + safeHead.Time += cfg.SecondBlockInterval() safeHead.Hash = mockHash(nextBatch.Timestamp/1000, 2) safeHead.L1Origin = nextBatch.Epoch() nextBatch, _, err = bq.NextBatch(context.Background(), safeHead) diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index a60cb089bd..f42bdedf4a 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -67,7 +67,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo } epoch := l1Blocks[0] - nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.BlockTime + nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.MillisecondBlockInterval() if batch.Timestamp > nextMilliTimestamp { log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextMilliTimestamp) return BatchFuture @@ -118,8 +118,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchDrop } - if batch.Timestamp < batchOrigin.MilliTimestamp() { - log.Warn("batch timestamp is less than L1 origin timestamp", "l2_ms_timestamp", batch.Timestamp, "l1_ms_timestamp", batchOrigin.MilliTimestamp(), "origin", batchOrigin.ID()) + if batch.Timestamp < batchOrigin.MillisecondTimestamp() { + log.Warn("batch timestamp is less than L1 origin timestamp", "l2_ms_timestamp", batch.Timestamp, "l1_ms_timestamp", batchOrigin.MillisecondTimestamp(), "origin", batchOrigin.ID()) return BatchDrop } @@ -136,7 +136,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchUndecided } nextOrigin := l1Blocks[1] - if batch.Timestamp >= nextOrigin.MilliTimestamp() { // check if the next L1 origin could have been adopted + if batch.Timestamp >= nextOrigin.MillisecondTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -194,7 +194,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B return BatchDrop } - nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.BlockTime + nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.MillisecondBlockInterval() if batch.GetTimestamp() > nextMilliTimestamp { log.Trace("received out-of-order batch for future processing after next batch", "next_ms_timestamp", nextMilliTimestamp) @@ -215,11 +215,11 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Warn("batch has misaligned timestamp, block time is too short") return BatchDrop } - if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%cfg.BlockTime != 0 { + if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%cfg.MillisecondBlockInterval() != 0 { log.Warn("batch has misaligned timestamp, not overlapped exactly") return BatchDrop } - parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.BlockTime - 1 + parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.MillisecondBlockInterval() - 1 var err error parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { @@ -290,7 +290,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B } } blockTimestamp := batch.GetBlockTimestamp(i) - if blockTimestamp < l1Origin.MilliTimestamp() { + if blockTimestamp < l1Origin.MillisecondTimestamp() { log.Warn("block timestamp is less than L1 origin timestamp", "l2_timestamp", blockTimestamp, "l1_timestamp", l1Origin.Time, "origin", l1Origin.ID()) return BatchDrop } @@ -307,7 +307,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Info("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid") return BatchUndecided } - if blockTimestamp >= l1Blocks[originIdx+1].MilliTimestamp() { // check if the next L1 origin could have been adopted + if blockTimestamp >= l1Blocks[originIdx+1].MillisecondTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index 3d987b3fcf..bfce6dc8ff 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -284,9 +284,9 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System if err != nil { return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err)) } - if safe.MillisecondTimestamp() < l1Origin.MilliTimestamp() { + if safe.MillisecondTimestamp() < l1Origin.MillisecondTimestamp() { return NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken", - safe, safe.MillisecondTimestamp(), l1Origin, l1Origin.MilliTimestamp())) + safe, safe.MillisecondTimestamp(), l1Origin, l1Origin.MillisecondTimestamp())) } // Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from. diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index 75f2374c5d..b87c9108d6 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -133,7 +133,6 @@ func TestParseL1InfoDepositTxData(t *testing.T) { require.Equal(t, L1InfoEcotoneLen, len(depTx.Data)) }) t.Run("first-block ecotone", func(t *testing.T) { - // TODO: rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) zero := uint64(2) diff --git a/op-node/rollup/derive/l2block_util.go b/op-node/rollup/derive/l2block_util.go index b1e6c4713b..aa44117fc0 100644 --- a/op-node/rollup/derive/l2block_util.go +++ b/op-node/rollup/derive/l2block_util.go @@ -61,7 +61,7 @@ func L2BlockToBlockRef(rollupCfg *rollup.Config, block L2BlockRefSource) (eth.L2 Number: number, ParentHash: block.ParentHash(), Time: block.Time(), - MilliPartTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), + MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index 5328d9f612..d5b47f25da 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -46,7 +46,7 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) Number: uint64(payload.BlockNumber), ParentHash: payload.ParentHash, Time: uint64(payload.Timestamp), - MilliPartTime: uint256.NewInt(0).SetBytes32(payload.PrevRandao[:]).Uint64(), // adapts millisecond part + MilliTime: uint256.NewInt(0).SetBytes32(payload.PrevRandao[:]).Uint64(), // adapts millisecond part L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 871028f885..9e785a94e3 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -557,7 +557,7 @@ func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { return &RawSpanBatch{ spanBatchPrefix: spanBatchPrefix{ - relTimestamp: span_start.Timestamp - b.GenesisTimestamp*1000, + relTimestamp: span_start.Timestamp - b.MillisecondGenesisTimestamp(), l1OriginNum: uint64(span_end.EpochNum), parentCheck: b.ParentCheck, l1OriginCheck: b.L1OriginCheck, @@ -570,6 +570,9 @@ func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { }, }, nil } +func (b *SpanBatch) MillisecondGenesisTimestamp() uint64 { + return b.GenesisTimestamp * 1000 +} // GetSingularBatches converts SpanBatchElements after L2 safe head to SingularBatches. // Since SpanBatchElement does not contain EpochHash, set EpochHash from the given L1 blocks. diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 5539d77f7d..73f87f4eb9 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -52,7 +52,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // If we are past the sequencer depth, we may want to advance the origin, but need to still // check the time of the next origin. - pastSeqDrift := l2Head.MillisecondTimestamp()+los.cfg.BlockTime > currentOrigin.MilliTimestamp()+msd + pastSeqDrift := l2Head.MillisecondTimestamp()+los.cfg.BlockTime > currentOrigin.MillisecondTimestamp()+msd // Limit the time to fetch next origin block by default refCtx, refCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer refCancel() @@ -60,7 +60,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc log.Warn("Next L2 block time is past the sequencer drift + current origin time", "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), "l2_block_ms_interval", los.cfg.BlockTime, - "l1_origin_ms_timestamp", currentOrigin.MilliTimestamp(), + "l1_origin_ms_timestamp", currentOrigin.MillisecondTimestamp(), "max_ms_drift", msd) // Must fetch next L1 block as long as it may take, cause we are pastSeqDrift refCtx = ctx @@ -97,7 +97,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // of slack. For simplicity, we implement our Sequencer to always start building on the latest // L1 block when we can. // If not pastSeqDrift and next origin receipts not cached, fallback to current origin. - if l2Head.MillisecondTimestamp()+los.cfg.BlockTime >= nextOrigin.MilliTimestamp() && (pastSeqDrift || receiptsCached) { + if l2Head.MillisecondTimestamp()+los.cfg.MillisecondBlockInterval() >= nextOrigin.MillisecondTimestamp() && (pastSeqDrift || receiptsCached) { return nextOrigin, nil } diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index c3092bf661..5ed04092da 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -99,7 +99,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { // empty blocks (other than the L1 info deposit and any user deposits). We handle this by // setting NoTxPool to true, which will cause the Sequencer to not include any transactions // from the transaction pool. - attrs.NoTxPool = attrs.MilliTimestamp() > l1Origin.MilliTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 + attrs.NoTxPool = attrs.MilliTimestamp() > l1Origin.MillisecondTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 // For the Ecotone activation block we shouldn't include any sequencer transactions. if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { @@ -154,7 +154,7 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { if safe { d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", buildingOnto, "onto_time", buildingOnto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - return time.Millisecond * time.Duration(d.rollupCfg.BlockTime) + return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval()) } head := d.engine.UnsafeL2Head() @@ -166,8 +166,8 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { return delay } - blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Millisecond - payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + d.rollupCfg.BlockTime)) + blockTime := time.Duration(d.rollupCfg.MillisecondBlockInterval()) * time.Millisecond + payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + d.rollupCfg.MillisecondBlockInterval())) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, @@ -226,12 +226,12 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if safe { d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) return nil, nil } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) if err != nil { - backoffTime := time.Millisecond * time.Duration(d.rollupCfg.BlockTime) + backoffTime := time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval()) if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { @@ -266,7 +266,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) // hold off from sequencing for a full block return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer temporarily failed to start building new block", "err", err) diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index 923dfc0768..0b04ffe1c1 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -259,7 +259,7 @@ func TestSequencerChaosMonkey(t *testing.T) { testGasLimit := eth.Uint64Quantity(10_000_000) return ð.PayloadAttributes{ - Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.BlockTime/1000), + Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.SecondBlockInterval()), PrevRandao: eth.Bytes32{}, SuggestedFeeRecipient: common.Address{}, Transactions: []eth.Data{infoDep}, @@ -283,10 +283,10 @@ func TestSequencerChaosMonkey(t *testing.T) { } // randomly make a L1 origin appear, if we can even select it nextL2MilliTime := l2Head.MillisecondTimestamp() + cfg.BlockTime - if nextL2MilliTime <= origin.MilliTimestamp() { + if nextL2MilliTime <= origin.MillisecondTimestamp() { return origin, nil } - maxTimeIncrement := nextL2MilliTime - origin.MilliTimestamp() + maxTimeIncrement := nextL2MilliTime - origin.MillisecondTimestamp() if maxTimeIncrement > maxL1BlockTimeGap { maxTimeIncrement = maxL1BlockTimeGap } diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 004c70ff42..b004a91916 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -269,7 +269,7 @@ func (s *Driver) eventLoop() { // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(s.config.BlockTime) * time.Millisecond * 2 + syncCheckInterval := time.Duration(s.config.MillisecondBlockInterval()) * time.Millisecond * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() lastUnsafeL2 := s.engineController.UnsafeL2Head() diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 839d111aa0..7fc128b111 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -159,6 +159,24 @@ type Config struct { LegacyUsePlasma bool `json:"use_plasma,omitempty"` } +// MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. +// Mainly used to support milli block interval. +func (cfg *Config) MillisecondBlockInterval() uint64 { + if cfg.BlockTime > 3 { + return cfg.BlockTime + } + return cfg.BlockTime * 1000 +} + +// SecondBlockInterval returns second block interval, which has compatible conversions. +// Mainly used to compatible to history fork time. +func (cfg *Config) SecondBlockInterval() uint64 { + if cfg.BlockTime <= 3 { + return cfg.BlockTime + } + return cfg.BlockTime / 1000 +} + // ValidateL1Config checks L1 config variables for errors. func (cfg *Config) ValidateL1Config(ctx context.Context, client L1Client) error { // Validate the L1 Client Chain ID @@ -193,7 +211,7 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 } func (cfg *Config) TimestampForBlock(blockNumber uint64) uint64 { - return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.BlockTime / 1000) + return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.SecondBlockInterval()) } func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { @@ -206,7 +224,7 @@ func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err err } wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp // Note: round down, we should not request blocks into the future. - blocksSinceGenesis := wallClockGenesisDiff / cfg.BlockTime + blocksSinceGenesis := wallClockGenesisDiff / cfg.MillisecondBlockInterval() return cfg.Genesis.L2.Number + blocksSinceGenesis, nil } @@ -428,8 +446,8 @@ func (c *Config) IsFjord(timestamp uint64) bool { // Fjord upgrade. func (c *Config) IsFjordActivationBlock(l2BlockTime uint64) bool { return c.IsFjord(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsFjord(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsFjord(l2BlockTime-c.SecondBlockInterval()) } // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. @@ -439,34 +457,34 @@ func (c *Config) IsInterop(timestamp uint64) bool { func (c *Config) IsRegolithActivationBlock(l2BlockTime uint64) bool { return c.IsRegolith(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsRegolith(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsRegolith(l2BlockTime-c.SecondBlockInterval()) } func (c *Config) IsCanyonActivationBlock(l2BlockTime uint64) bool { return c.IsCanyon(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsCanyon(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsCanyon(l2BlockTime-c.SecondBlockInterval()) } func (c *Config) IsDeltaActivationBlock(l2BlockTime uint64) bool { return c.IsDelta(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsDelta(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsDelta(l2BlockTime-c.SecondBlockInterval()) } // IsEcotoneActivationBlock returns whether the specified block is the first block subject to the // Ecotone upgrade. Ecotone activation at genesis does not count. func (c *Config) IsEcotoneActivationBlock(l2BlockTime uint64) bool { return c.IsEcotone(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsEcotone(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsEcotone(l2BlockTime-c.SecondBlockInterval()) } func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { return c.IsInterop(l2BlockTime) && - l2BlockTime >= c.BlockTime/1000 && - !c.IsInterop(l2BlockTime-c.BlockTime/1000) + l2BlockTime >= c.SecondBlockInterval() && + !c.IsInterop(l2BlockTime-c.SecondBlockInterval()) } // ForkchoiceUpdatedVersion returns the EngineAPIMethod suitable for the chain hard fork version. diff --git a/op-node/service.go b/op-node/service.go index e36001fe59..b6df938f75 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -28,9 +28,9 @@ import ( ) const ( - MinBlockTimeSeconds = 1 - MaxBlockTimeSeconds = 3 - MaxBlockTimeMs = 750 + minSecondBlockInterval = 1 + maxSecondBlockInterval = 3 + maxMillisecondBlockInterval = 750 ) // NewConfig creates a Config from the provided flags or environment variables. @@ -50,11 +50,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { } { - if rollupConfig.BlockTime >= MinBlockTimeSeconds && rollupConfig.BlockTime <= MaxBlockTimeSeconds { + if rollupConfig.BlockTime >= minSecondBlockInterval && rollupConfig.BlockTime <= maxSecondBlockInterval { // Convert legacy second-level timestamp to millisecond timestamp, // This is a compatibility behavior. rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 - } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > MaxBlockTimeMs { + } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > maxMillisecondBlockInterval { return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) } // rollupConfig.BlockTime is millisecond block interval diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index 8a0e42c7c4..24b28611ae 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -17,7 +17,7 @@ type BlockInfo interface { Root() common.Hash // state-root NumberU64() uint64 Time() uint64 - MilliTimestamp() uint64 + MillisecondTimestamp() uint64 // MixDigest field, reused for randomness after The Merge (Bellatrix hardfork) MixDigest() common.Hash BaseFee() *big.Int @@ -35,16 +35,16 @@ type BlockInfo interface { } func InfoToL1BlockRef(info BlockInfo) L1BlockRef { - milliseconds := uint64(0) + milliPart := uint64(0) if info.MixDigest() != (common.Hash{}) { - milliseconds = uint256.NewInt(0).SetBytes32(info.MixDigest().Bytes()).Uint64() + milliPart = uint256.NewInt(0).SetBytes32(info.MixDigest().Bytes()).Uint64() } return L1BlockRef{ Hash: info.Hash(), Number: info.NumberU64(), ParentHash: info.ParentHash(), Time: info.Time(), - MsTime: milliseconds, + MilliTime: milliPart, } } @@ -79,12 +79,12 @@ func (b blockInfo) ParentBeaconRoot() *common.Hash { return b.Block.BeaconRoot() } -func (b blockInfo) MilliTimestamp() uint64 { - milliseconds := uint64(0) +func (b blockInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) if b.MixDigest() != (common.Hash{}) { - milliseconds = uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() + milliPart = uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() } - return b.Block.Time()*1000 + milliseconds + return b.Block.Time()*1000 + milliPart } func BlockToInfo(b *types.Block) BlockInfo { @@ -117,13 +117,13 @@ func (h headerBlockInfo) Time() uint64 { return h.Header.Time } -func (h headerBlockInfo) MilliTimestamp() uint64 { - milliseconds := uint64(0) +func (h headerBlockInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) if h.MixDigest() != (common.Hash{}) { - milliseconds = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + milliPart = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } - return h.Header.Time*1000 + milliseconds + return h.Header.Time*1000 + milliPart } func (h headerBlockInfo) MixDigest() common.Hash { diff --git a/op-service/eth/heads.go b/op-service/eth/heads.go index 871590af91..f241dc4af8 100644 --- a/op-service/eth/heads.go +++ b/op-service/eth/heads.go @@ -54,7 +54,7 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) ( Number: header.Number.Uint64(), ParentHash: header.ParentHash, Time: header.Time, - MsTime: mTime, + MilliTime: mTime, }) case <-eventsCtx.Done(): return nil diff --git a/op-service/eth/id.go b/op-service/eth/id.go index 6fdb35885c..56d0e2831f 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -34,14 +34,14 @@ type L2BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` // second timestamp - MilliPartTime uint64 `json:"milliparttime"` // support millisecond + Time uint64 `json:"timestamp"` // second timestamp + MilliTime uint64 `json:"millitimestamp"` // support millisecond L1Origin BlockID `json:"l1origin"` SequenceNumber uint64 `json:"sequenceNumber"` // distance to first block of epoch } func (id L2BlockRef) MillisecondTimestamp() uint64 { - return id.Time*1000 + id.MilliPartTime + return id.Time*1000 + id.MilliTime } func (id L2BlockRef) String() string { @@ -58,12 +58,12 @@ type L1BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` // second timestamp - MsTime uint64 `json:"msTimestamp"` // support millisecond + Time uint64 `json:"timestamp"` // second timestamp + MilliTime uint64 `json:"millisecondTimestamp"` // support millisecond } -func (id L1BlockRef) MilliTimestamp() uint64 { - return id.Time*1000 + id.MsTime +func (id L1BlockRef) MillisecondTimestamp() uint64 { + return id.Time*1000 + id.MilliTime } func (id L1BlockRef) String() string { diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 7a6fb6fee0..66d4822ef3 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -196,6 +196,10 @@ type ExecutionPayload struct { ExcessBlobGas *Uint64Quantity `json:"excessBlobGas,omitempty"` } +func (payload *ExecutionPayload) MillisecondTimestamp() uint64 { + return uint64(payload.Timestamp) * 1000 +} + func (payload *ExecutionPayload) ID() BlockID { return BlockID{Hash: payload.BlockHash, Number: uint64(payload.BlockNumber)} } diff --git a/op-service/sources/l2_client.go b/op-service/sources/l2_client.go index 7c60087464..f27fbd5744 100644 --- a/op-service/sources/l2_client.go +++ b/op-service/sources/l2_client.go @@ -32,9 +32,9 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig span := int(config.SeqWindowSize) * 3 / 2 // Estimate number of L2 blocks in this span of L1 blocks // (there's always one L2 block per L1 block, L1 is thus the minimum, even if block time is very high) - if config.BlockTime/1000 < 12 && config.BlockTime/1000 > 0 { + if config.SecondBlockInterval() < 12 && config.SecondBlockInterval() > 0 { span *= 12 - span /= int(config.BlockTime / 1000) + span /= int(config.SecondBlockInterval()) } fullSpan := span if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large diff --git a/op-service/sources/types.go b/op-service/sources/types.go index 2a38f47a9f..5329ab27c1 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -63,12 +63,12 @@ func (h headerInfo) Time() uint64 { return h.Header.Time } -func (h headerInfo) MilliTimestamp() uint64 { - milliseconds := uint64(0) +func (h headerInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) if h.MixDigest() != (common.Hash{}) { - milliseconds = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + milliPart = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() } - return h.Header.Time*1000 + milliseconds + return h.Header.Time*1000 + milliPart } func (h headerInfo) MixDigest() common.Hash { diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index aaa9feb755..b7c86e9a3d 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -21,7 +21,7 @@ type MockBlockInfo struct { InfoRoot common.Hash InfoNum uint64 InfoTime uint64 - InfoMTime uint64 + InfoMilliTime uint64 InfoMixDigest [32]byte InfoBaseFee *big.Int InfoBlobBaseFee *big.Int @@ -57,8 +57,8 @@ func (l *MockBlockInfo) Time() uint64 { return l.InfoTime } -func (l *MockBlockInfo) MilliTimestamp() uint64 { - return l.InfoTime*1000 + l.InfoMTime +func (l *MockBlockInfo) MillisecondTimestamp() uint64 { + return l.InfoTime*1000 + l.InfoMilliTime } func (l *MockBlockInfo) MixDigest() common.Hash { From 6ef4b4a61bb940836d577a1b41f9b16796636b2e Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Wed, 5 Mar 2025 21:43:36 +0800 Subject: [PATCH 07/30] chore: refine l2 attribute millisecond timestamp setter (#268) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- op-batcher/batcher/channel_manager.go | 2 +- op-chain-ops/genesis/config.go | 4 +++- op-node/p2p/host_test.go | 1 - op-node/rollup/derive/attributes.go | 17 ++++++++--------- op-node/rollup/derive/batch_queue_test.go | 3 --- op-node/rollup/derive/batches.go | 4 ++-- op-node/rollup/derive/channel_out.go | 4 ++-- op-node/rollup/derive/engine_update.go | 2 +- op-node/rollup/driver/sequencer.go | 2 +- op-service/eth/types.go | 19 +++++++++++++++---- 10 files changed, 33 insertions(+), 25 deletions(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index c9fa50b4cf..67c404a138 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -368,7 +368,7 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo Number: block.NumberU64(), ParentHash: block.ParentHash(), Time: block.Time(), - MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts millisecond part + MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts l1 millisecond part L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number}, SequenceNumber: l1info.SequenceNumber, } diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 2adf5e62fc..a9c290327f 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -319,6 +319,8 @@ func (d *DeployConfig) L2MillisecondBlockInterval() uint64 { return d.L2BlockTime * 1000 } +// L2SecondBlockInterval is just used by ut&e2e test. +// TODO: ut&e2e need to be refined later. func (d *DeployConfig) L2SecondBlockInterval() uint64 { if d.L2BlockTime <= 3 { // has been second @@ -465,7 +467,7 @@ func (d *DeployConfig) Check() error { // L2 block time must always be smaller than L1 block time if d.L1MillisecondBlockInterval() < d.L2MillisecondBlockInterval() { - return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2MillisecondBlockInterval(), d.L1MillisecondBlockInterval()) + return fmt.Errorf("L2 block interval ms (%d) is larger than L1 block interval ms (%d)", d.L2MillisecondBlockInterval(), d.L1MillisecondBlockInterval()) } if d.RequiredProtocolVersion == (params.ProtocolVersion{}) { log.Warn("RequiredProtocolVersion is empty") diff --git a/op-node/p2p/host_test.go b/op-node/p2p/host_test.go index 986cce2cd3..3fcfb7714c 100644 --- a/op-node/p2p/host_test.go +++ b/op-node/p2p/host_test.go @@ -261,7 +261,6 @@ func TestP2PFull(t *testing.T) { require.NoError(t, p2pClientA.ProtectPeer(ctx, hostB.ID())) require.NoError(t, p2pClientA.UnprotectPeer(ctx, hostB.ID())) - // TODO: } func TestDiscovery(t *testing.T) { diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index d46409381b..3518980cac 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -5,15 +5,13 @@ import ( "fmt" "math/big" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/bsc" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" ) var ( @@ -170,16 +168,17 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex } } - return ð.PayloadAttributes{ - Timestamp: hexutil.Uint64(nextL2MilliTime / 1000), // second part - PrevRandao: uint256.NewInt(nextL2MilliTime % 1000).Bytes32(), // millisecond part + pa := ð.PayloadAttributes{ + PrevRandao: eth.Bytes32(l1Info.MixDigest()), SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, Transactions: txs, NoTxPool: true, GasLimit: (*eth.Uint64Quantity)(&sysConfig.GasLimit), Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconRoot, - }, nil + } + pa.SetMillisecondTimestamp(nextL2MilliTime) + return pa, nil } func (ba *FetchingAttributesBuilder) CachePayloadByHash(payload *eth.ExecutionPayloadEnvelope) bool { diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index 68eb182461..d0e5af675c 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/binary" "errors" - "fmt" "io" "math/big" "math/rand" @@ -290,8 +289,6 @@ func BatchQueueEager(t *testing.T, batchType int) { for i := 0; i < len(expectedOutputBatches); i++ { b, _, e := bq.NextBatch(context.Background(), safeHead) - log.Info("DEBUG: ", ", i=", i, ", b=", b, ", safe_head=", safeHead) - fmt.Printf("DEBUG: i=%v, b=%v, safehead=%v\n", i, b, safeHead) require.ErrorIs(t, e, expectedOutputErrors[i]) if b == nil { require.Nil(t, expectedOutputBatches[i]) diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index f42bdedf4a..a3b8b31e03 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -125,7 +125,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := (batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time)) * 1000; batch.Timestamp > max { + if max := batchOrigin.MillisecondTimestamp() + spec.MaxSequencerDrift(batchOrigin.Time)*1000; batch.Timestamp > max { if len(batch.Transactions) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. @@ -297,7 +297,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := (l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time)) * 1000; blockTimestamp > max { + if max := l1Origin.MillisecondTimestamp() + spec.MaxSequencerDrift(l1Origin.Time)*1000; blockTimestamp > max { if len(batch.GetBlockTransactions(i)) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index 33d9e4318b..f8b91f3319 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -8,11 +8,11 @@ import ( "io" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/uint256" ) var ( @@ -243,7 +243,7 @@ func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*Singul milliPart := uint64(0) if block.MixDigest() != (common.Hash{}) { - milliPart = uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64() + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) } milliTimestamp := block.Time()*1000 + milliPart diff --git a/op-node/rollup/derive/engine_update.go b/op-node/rollup/derive/engine_update.go index 656e52474b..4b067165b7 100644 --- a/op-node/rollup/derive/engine_update.go +++ b/op-node/rollup/derive/engine_update.go @@ -203,7 +203,7 @@ func confirmPayload( } metrics.RecordSequencerStepTime("forkChoiceUpdateHeads", time.Since(start)) log.Info("inserted block", "hash", payload.BlockHash, "number", uint64(payload.BlockNumber), - "state_root", payload.StateRoot, "timestamp", uint64(payload.Timestamp), "parent", payload.ParentHash, + "state_root", payload.StateRoot, "timestamp_ms", payload.MillisecondTimestamp(), "parent", payload.ParentHash, "prev_randao", payload.PrevRandao, "fee_recipient", payload.FeeRecipient, "txs", len(payload.Transactions), "update_safe", updateSafe) return envelope, BlockInsertOK, nil diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 5ed04092da..9fc36aec31 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -99,7 +99,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { // empty blocks (other than the L1 info deposit and any user deposits). We handle this by // setting NoTxPool to true, which will cause the Sequencer to not include any transactions // from the transaction pool. - attrs.NoTxPool = attrs.MilliTimestamp() > l1Origin.MillisecondTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 + attrs.NoTxPool = attrs.MillisecondTimestamp() > l1Origin.MillisecondTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 // For the Ecotone activation block we shouldn't include any sequencer transactions. if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 66d4822ef3..ba71dfc05c 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -197,7 +197,8 @@ type ExecutionPayload struct { } func (payload *ExecutionPayload) MillisecondTimestamp() uint64 { - return uint64(payload.Timestamp) * 1000 + milliPart := uint64(payload.PrevRandao[0])*256 + uint64(payload.PrevRandao[1]) + return uint64(payload.Timestamp)*1000 + milliPart } func (payload *ExecutionPayload) ID() BlockID { @@ -332,9 +333,19 @@ type PayloadAttributes struct { GasLimit *Uint64Quantity `json:"gasLimit,omitempty"` } -func (pa *PayloadAttributes) MilliTimestamp() uint64 { - // TODO: - return uint64(pa.Timestamp) * 1000 +func (pa *PayloadAttributes) MillisecondTimestamp() uint64 { + milliPart := uint64(pa.PrevRandao[0])*256 + uint64(pa.PrevRandao[1]) + return uint64(pa.Timestamp)*1000 + milliPart +} + +// SetMillisecondTimestamp is used to set millisecond timestamp. +// [32]byte PrevRandao +// [0][1] represent l2 millisecond's mill part. +func (pa *PayloadAttributes) SetMillisecondTimestamp(ts uint64) { + pa.Timestamp = hexutil.Uint64(ts / 1000) + milliPartBytes := uint256.NewInt(ts % 1000).Bytes32() + pa.PrevRandao[0] = milliPartBytes[30] + pa.PrevRandao[1] = milliPartBytes[31] } type ExecutePayloadStatus string From 7a188bca995a83a70ae2f292c833d47108f8b6c0 Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Thu, 6 Mar 2025 10:13:22 +0800 Subject: [PATCH 08/30] fix: fix channel manager l2 milli-part parse (#269) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- op-batcher/batcher/channel_manager.go | 9 +++++++-- op-node/rollup/derive/l2block_util.go | 14 +++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 67c404a138..7d0f10e29a 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/holiman/uint256" ) var ErrReorg = errors.New("block does not extend existing chain") @@ -363,12 +362,18 @@ func (s *channelManager) AddL2Block(block *types.Block) error { } func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo) eth.L2BlockRef { + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + // adapts l2 millisecond, highest 2 bytes as milli-part. + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + return eth.L2BlockRef{ Hash: block.Hash(), Number: block.NumberU64(), ParentHash: block.ParentHash(), Time: block.Time(), - MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), // adapts l1 millisecond part + MilliTime: milliPart, L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number}, SequenceNumber: l1info.SequenceNumber, } diff --git a/op-node/rollup/derive/l2block_util.go b/op-node/rollup/derive/l2block_util.go index aa44117fc0..6d96478ed9 100644 --- a/op-node/rollup/derive/l2block_util.go +++ b/op-node/rollup/derive/l2block_util.go @@ -3,12 +3,10 @@ package derive import ( "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) // L2BlockRefSource is a source for the generation of a L2BlockRef. E.g. a @@ -56,12 +54,18 @@ func L2BlockToBlockRef(rollupCfg *rollup.Config, block L2BlockRefSource) (eth.L2 sequenceNumber = info.SequenceNumber } + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + // adapts l2 millisecond, highest 2 bytes as milli-part. + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + return eth.L2BlockRef{ Hash: hash, Number: number, ParentHash: block.ParentHash(), Time: block.Time(), - MilliTime: uint256.NewInt(0).SetBytes32(block.MixDigest().Bytes()[:]).Uint64(), + MilliTime: milliPart, L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil From a67eb4ba790118160ea419487dbca3a658691867 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Tue, 4 Mar 2025 20:34:38 +0800 Subject: [PATCH 09/30] fix l1 origin time --- op-chain-ops/genesis/config.go | 1 + op-node/rollup/derive/attributes.go | 1 + op-node/rollup/derive/l1_traversal.go | 2 ++ op-node/rollup/driver/origin_selector.go | 1 + op-node/rollup/driver/sequencer.go | 7 +++---- 5 files changed, 8 insertions(+), 4 deletions(-) diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index a9c290327f..2aefa799bc 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -616,6 +616,7 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 { return &v } +// TODO judge if it is need to use milliseconds timestamp with the fork information func (d *DeployConfig) EcotoneTime(genesisTime uint64) *uint64 { if d.L2GenesisEcotoneTimeOffset == nil { return nil diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 3518980cac..0f20f765e6 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -84,6 +84,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err)) } // apply sysCfg changes + // TODO: may need to pass l1origin milli-timestamp later if IsEcotone() use the milli-timestamp if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.rollupCfg, info.Time()); err != nil { return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err)) } diff --git a/op-node/rollup/derive/l1_traversal.go b/op-node/rollup/derive/l1_traversal.go index 84d2fbc970..be87cf11c5 100644 --- a/op-node/rollup/derive/l1_traversal.go +++ b/op-node/rollup/derive/l1_traversal.go @@ -76,6 +76,8 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error { if err != nil { return NewTemporaryError(fmt.Errorf("failed to fetch receipts of L1 block %s (parent: %s) for L1 sysCfg update: %w", nextL1Origin, origin, err)) } + + // TODO: may need to pass l1origin milli-timestamp later if IsEcotone() use the milli-timestamp if err := UpdateSystemConfigWithL1Receipts(&l1t.sysCfg, receipts, l1t.cfg, nextL1Origin.Time); err != nil { // the sysCfg changes should always be formatted correctly. return NewCriticalError(fmt.Errorf("failed to update L1 sysCfg with receipts from block %s: %w", nextL1Origin, err)) diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 73f87f4eb9..1216c084f1 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -46,6 +46,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc if err != nil { return eth.L1BlockRef{}, err } + // TODO: may need to pass l1origin milli-timestamp later if IsFjord() use the milli-timestamp msd := los.spec.MaxSequencerDrift(currentOrigin.Time) * 1000 // ms log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, "l2_head", l2Head, "l2_head_time_ms", l2Head.MillisecondTimestamp(), "max_seq_drift_ms", msd) diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 9fc36aec31..04fc95fd04 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -231,23 +231,22 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) if err != nil { - backoffTime := time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval()) if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(backoffTime) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer failed temporarily to seal new block", "err", err) - d.nextAction = d.timeNow().Add(backoffTime) + d.nextAction = d.timeNow().Add(time.Second) // We don't explicitly cancel block building jobs upon temporary errors: we may still finish the block. // Any unfinished block building work eventually times out, and will be cleaned up that way. } else { d.log.Error("sequencer failed to seal block with unclassified error", "err", err) - d.nextAction = d.timeNow().Add(backoffTime) + d.nextAction = d.timeNow().Add(time.Second) d.CancelBuildingBlock(ctx) } return nil, nil From 374a5c7874e07fc33ae744e4632251813c59d546 Mon Sep 17 00:00:00 2001 From: VM Date: Tue, 11 Mar 2025 11:23:16 +0800 Subject: [PATCH 10/30] fix: change L2OutputOracle contract l2 timestamp to millisecond --- packages/contracts-bedrock/src/L1/L2OutputOracle.sol | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index fe2c7dd7c8..d127b35ac8 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -25,7 +25,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public submissionInterval; - /// @notice The time between L2 blocks in seconds. Once set, this value MUST NOT be modified. + /// @notice The time between L2 blocks in milliseconds. Once set, this value MUST NOT be modified. /// @custom:network-specific uint256 public l2BlockTime; @@ -202,7 +202,7 @@ contract L2OutputOracle is Initializable, ISemver { ); require( - computeL2Timestamp(_l2BlockNumber) < block.timestamp, + computeL2Timestamp(_l2BlockNumber) < (block.timestamp * 1000), "L2OutputOracle: cannot propose L2 output in the future" ); @@ -311,6 +311,6 @@ contract L2OutputOracle is Initializable, ISemver { /// @param _l2BlockNumber The L2 block number of the target block. /// @return L2 timestamp of the given block. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { - return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); + return startingTimestamp*1000 + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); } } From 09bf99007a6287e0f4d3e91546989516ea22244d Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:08:30 +0800 Subject: [PATCH 11/30] revert: revert contract change (#270) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- packages/contracts-bedrock/src/L1/L2OutputOracle.sol | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index d127b35ac8..fe2c7dd7c8 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -25,7 +25,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public submissionInterval; - /// @notice The time between L2 blocks in milliseconds. Once set, this value MUST NOT be modified. + /// @notice The time between L2 blocks in seconds. Once set, this value MUST NOT be modified. /// @custom:network-specific uint256 public l2BlockTime; @@ -202,7 +202,7 @@ contract L2OutputOracle is Initializable, ISemver { ); require( - computeL2Timestamp(_l2BlockNumber) < (block.timestamp * 1000), + computeL2Timestamp(_l2BlockNumber) < block.timestamp, "L2OutputOracle: cannot propose L2 output in the future" ); @@ -311,6 +311,6 @@ contract L2OutputOracle is Initializable, ISemver { /// @param _l2BlockNumber The L2 block number of the target block. /// @return L2 timestamp of the given block. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { - return startingTimestamp*1000 + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); + return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); } } From 32712e3bd425dd47d69cc2b8af9b8892dd0cf3b4 Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:37:35 +0800 Subject: [PATCH 12/30] chore: fix some timestamp bug by self test (#271) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- op-node/rollup/derive/payload_util.go | 7 +++++-- op-service/eth/id.go | 2 +- op-service/eth/types.go | 4 ++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index d5b47f25da..70fd30a355 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" ) // PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload, @@ -41,12 +40,16 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) sequenceNumber = info.SequenceNumber } + // adapts millisecond part + milliPart := uint64(0) + milliPart = uint64(payload.PrevRandao[0])*256 + uint64(payload.PrevRandao[1]) + return eth.L2BlockRef{ Hash: payload.BlockHash, Number: uint64(payload.BlockNumber), ParentHash: payload.ParentHash, Time: uint64(payload.Timestamp), - MilliTime: uint256.NewInt(0).SetBytes32(payload.PrevRandao[:]).Uint64(), // adapts millisecond part + MilliTime: milliPart, L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-service/eth/id.go b/op-service/eth/id.go index 56d0e2831f..dedbdfde65 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -45,7 +45,7 @@ func (id L2BlockRef) MillisecondTimestamp() uint64 { } func (id L2BlockRef) String() string { - return fmt.Sprintf("%s:%d", id.Hash.String(), id.Number) + return fmt.Sprintf("%s:%d:%d", id.Hash.String(), id.Number, id.MillisecondTimestamp()) } // TerminalString implements log.TerminalStringer, formatting a string for console diff --git a/op-service/eth/types.go b/op-service/eth/types.go index ba71dfc05c..9bf83e1863 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -346,6 +346,10 @@ func (pa *PayloadAttributes) SetMillisecondTimestamp(ts uint64) { milliPartBytes := uint256.NewInt(ts % 1000).Bytes32() pa.PrevRandao[0] = milliPartBytes[30] pa.PrevRandao[1] = milliPartBytes[31] + + // It is just a marker byte to ensure that the whole is not empty; + // op-geth relies on non-empty to determine that the passed in millisecond timestamp. + pa.PrevRandao[2] = 1 } type ExecutePayloadStatus string From d51fed47273249e1b7e0a0823029d778bfcc6764 Mon Sep 17 00:00:00 2001 From: 2020xibao <2020xibao@gmail.com> Date: Mon, 17 Mar 2025 21:48:11 +0800 Subject: [PATCH 13/30] chore: add volta fork conf --- op-node/rollup/types.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 7fc128b111..b05fa8a9b3 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -128,6 +128,10 @@ type Config struct { // Active if SnowTime != nil && L2 block timestamp >= *SnowTime, inactive otherwise. SnowTime *uint64 `json:"snow_time,omitempty"` + // VoltaTime sets the activation time of the VoltaTime network upgrade. + // Active if VoltaTime != nil && L2 block timestamp >= *VoltaTime, inactive otherwise. + VoltaTime *uint64 `json:"volta_time,omitempty"` + // Note: below addresses are part of the block-derivation process, // and required to be the same network-wide to stay in consensus. @@ -159,6 +163,19 @@ type Config struct { LegacyUsePlasma bool `json:"use_plasma,omitempty"` } +const millisecondBlockIntervalVolta = 500 + +func (cfg *Config) MillisecondBlockIntervalV2(secondTimeStamp uint64) uint64 { + if cfg.IsVolta(secondTimeStamp) { + return millisecondBlockIntervalVolta + } + return cfg.BlockTime * 1000 +} + +func (c *Config) IsVolta(timestamp uint64) bool { + return c.VoltaTime != nil && timestamp >= *c.VoltaTime +} + // MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. // Mainly used to support milli block interval. func (cfg *Config) MillisecondBlockInterval() uint64 { From 725388625c5a14b56e50725679b3b2b2f624d73f Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 10:09:40 +0800 Subject: [PATCH 14/30] feat: add rollup config base func for hard fork --- op-node/p2p/sync.go | 2 +- op-node/rollup/chain_spec.go | 9 ++- op-node/rollup/types.go | 138 +++++++++++++++++++++++------------ 3 files changed, 102 insertions(+), 47 deletions(-) diff --git a/op-node/p2p/sync.go b/op-node/p2p/sync.go index 3872526be0..58569f326c 100644 --- a/op-node/p2p/sync.go +++ b/op-node/p2p/sync.go @@ -683,7 +683,7 @@ func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, expectedBlockNum } version := binary.LittleEndian.Uint32(versionData[:]) - isCanyon := s.cfg.IsCanyon(s.cfg.TimestampForBlock(expectedBlockNum)) + isCanyon := s.cfg.IsCanyon(s.cfg.MillisecondTimestampForBlock(expectedBlockNum) / 1000) envelope, err := readExecutionPayload(version, data, isCanyon) if err != nil { return err diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index f11f8c78b1..b43a5dc913 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -42,6 +42,7 @@ const ( Ecotone ForkName = "ecotone" Fjord ForkName = "fjord" Interop ForkName = "interop" + Volta ForkName = "volta" None ForkName = "none" ) @@ -52,7 +53,8 @@ var nextFork = map[ForkName]ForkName{ Delta: Ecotone, Ecotone: Fjord, Fjord: Interop, - Interop: None, + Interop: Volta, + Volta: None, } type ChainSpec struct { @@ -134,6 +136,9 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { if s.config.IsInterop(block.Time) { s.currentFork = Interop } + if s.config.IsVolta(block.Time) { + s.currentFork = Volta + } log.Info("Current hardfork version detected", "forkName", s.currentFork) return } @@ -153,6 +158,8 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { foundActivationBlock = s.config.IsFjordActivationBlock(block.Time) case Interop: foundActivationBlock = s.config.IsInteropActivationBlock(block.Time) + case Volta: + foundActivationBlock = s.config.IsVoltaActivationBlock(block.MillisecondTimestamp()) } if foundActivationBlock { diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index b05fa8a9b3..4525f3f43a 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -35,6 +35,11 @@ var ( ErrL2ChainIDNotPositive = errors.New("L2 chain ID must be non-zero and positive") ) +var ( + VoltBlockTime uint64 = 500 + BeforeVoltBlockTime uint64 = 1000 +) + // NetworkNames are user friendly names to use in the chain spec banner. var NetworkNames = map[string]string{ "56": "bscMainnet", @@ -74,7 +79,8 @@ type Config struct { Genesis Genesis `json:"genesis"` // BlockTime is the interval configuration of L2 block; // which supports the new millisecond unit and is compatible with the legacy second unit. - BlockTime uint64 `json:"block_time"` + // Temp delete, will reset after developing + //BlockTime uint64 `json:"block_time"` // Sequencer batches may not be more than MaxSequencerDrift seconds after // the L1 timestamp of the sequencing window end. // @@ -163,36 +169,62 @@ type Config struct { LegacyUsePlasma bool `json:"use_plasma,omitempty"` } -const millisecondBlockIntervalVolta = 500 +//const millisecondBlockIntervalVolta = 500 -func (cfg *Config) MillisecondBlockIntervalV2(secondTimeStamp uint64) uint64 { - if cfg.IsVolta(secondTimeStamp) { - return millisecondBlockIntervalVolta +func (cfg *Config) MillisecondBlockInterval(millisecondTimestamp uint64) uint64 { + if cfg.IsVolta(millisecondTimestamp / 1000) { + return VoltBlockTime } - return cfg.BlockTime * 1000 + return BeforeVoltBlockTime +} + +func (cfg *Config) SecondBlockInterval(millisecondTimestamp uint64) uint64 { + return cfg.MillisecondBlockInterval(millisecondTimestamp) / 1000 +} + +func (cfg *Config) NextMillisecondBlockTime(millisecondTimestamp uint64) uint64 { + return millisecondTimestamp + cfg.MillisecondBlockInterval(millisecondTimestamp) +} + +func (cfg *Config) NextSecondBlockTime(millisecondTimestamp uint64) uint64 { + return cfg.NextMillisecondBlockTime(millisecondTimestamp) / 1000 } func (c *Config) IsVolta(timestamp uint64) bool { return c.VoltaTime != nil && timestamp >= *c.VoltaTime } -// MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. -// Mainly used to support milli block interval. -func (cfg *Config) MillisecondBlockInterval() uint64 { - if cfg.BlockTime > 3 { - return cfg.BlockTime +func (c *Config) VoltaBlocNumber() uint64 { + return (*c.VoltaTime-c.Genesis.L2Time)/(BeforeVoltBlockTime/1000) + c.Genesis.L2.Number +} + +func (c *Config) IsVoltaActivationBlock(l2BlockMillisecondTime uint64) bool { + if l2BlockMillisecondTime/1000 != 0 { + return false } - return cfg.BlockTime * 1000 + l2BlockTime := l2BlockMillisecondTime / 1000 + return c.IsVolta(l2BlockTime) && + l2BlockTime >= BeforeVoltBlockTime/1000 && + !c.IsVolta(l2BlockTime-BeforeVoltBlockTime/1000) } +// MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. +// Mainly used to support milli block interval. +//func (cfg *Config) MillisecondBlockInterval(millisecondTimestamp uint64) uint64 { +// if cfg.BlockTime > 3 { +// return cfg.BlockTime +// } +// return cfg.BlockTime * 1000 +//} + // SecondBlockInterval returns second block interval, which has compatible conversions. // Mainly used to compatible to history fork time. -func (cfg *Config) SecondBlockInterval() uint64 { - if cfg.BlockTime <= 3 { - return cfg.BlockTime - } - return cfg.BlockTime / 1000 -} +//func (cfg *Config) SecondBlockInterval() uint64 { +// if cfg.BlockTime <= 3 { +// return cfg.BlockTime +// } +// return cfg.BlockTime / 1000 +//} // ValidateL1Config checks L1 config variables for errors. func (cfg *Config) ValidateL1Config(ctx context.Context, client L1Client) error { @@ -227,22 +259,35 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 return nil } -func (cfg *Config) TimestampForBlock(blockNumber uint64) uint64 { - return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.SecondBlockInterval()) +func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { + voltaBlockNumber := cfg.VoltaBlocNumber() + if blockNumber <= voltaBlockNumber { + return cfg.Genesis.L2Time*1000 + ((blockNumber - cfg.Genesis.L2.Number) * BeforeVoltBlockTime) + } else { + return voltaBlockNumber + *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime + } } func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { - // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that - // difference by the block time to get the expected L2 block number at the current time. If the - // unsafe head does not have this block number, then there is a gap in the queue. - genesisMilliTimestamp := cfg.Genesis.L2Time * 1000 - if milliTimestamp < genesisMilliTimestamp { - return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisMilliTimestamp) + if milliTimestamp <= *cfg.VoltaTime*1000 { + // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that + // difference by the block time to get the expected L2 block number at the current time. If the + // unsafe head does not have this block number, then there is a gap in the queue. + genesisMilliTimestamp := cfg.Genesis.L2Time * 1000 + if milliTimestamp < genesisMilliTimestamp { + return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisMilliTimestamp) + } + wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp + // Note: round down, we should not request blocks into the future. + blocksSinceGenesis := wallClockGenesisDiff / BeforeVoltBlockTime + return cfg.Genesis.L2.Number + blocksSinceGenesis, nil + } else { + voltaBlockNumber := cfg.VoltaBlocNumber() + voltaMilliTimestamp := *cfg.VoltaTime * 1000 + wallClockGenesisDiff := milliTimestamp - voltaMilliTimestamp + blocksSinceVolta := wallClockGenesisDiff / VoltBlockTime + return voltaBlockNumber + blocksSinceVolta, nil } - wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp - // Note: round down, we should not request blocks into the future. - blocksSinceGenesis := wallClockGenesisDiff / cfg.MillisecondBlockInterval() - return cfg.Genesis.L2.Number + blocksSinceGenesis, nil } type L1Client interface { @@ -305,9 +350,9 @@ func (cfg *Config) CheckL2GenesisBlockHash(ctx context.Context, client L2Client) // Check verifies that the given configuration makes sense func (cfg *Config) Check() error { - if cfg.BlockTime == 0 { - return ErrBlockTimeZero - } + //if cfg.BlockTime == 0 { + // return ErrBlockTimeZero + //} if cfg.ChannelTimeout == 0 { return ErrMissingChannelTimeout } @@ -372,6 +417,9 @@ func (cfg *Config) Check() error { if err := checkFork(cfg.EcotoneTime, cfg.FjordTime, Ecotone, Fjord); err != nil { return err } + if err := checkFork(cfg.FjordTime, cfg.VoltaTime, Fjord, Volta); err != nil { + return err + } return nil } @@ -463,8 +511,8 @@ func (c *Config) IsFjord(timestamp uint64) bool { // Fjord upgrade. func (c *Config) IsFjordActivationBlock(l2BlockTime uint64) bool { return c.IsFjord(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsFjord(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsFjord(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. @@ -474,34 +522,34 @@ func (c *Config) IsInterop(timestamp uint64) bool { func (c *Config) IsRegolithActivationBlock(l2BlockTime uint64) bool { return c.IsRegolith(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsRegolith(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsRegolith(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsCanyonActivationBlock(l2BlockTime uint64) bool { return c.IsCanyon(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsCanyon(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsCanyon(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsDeltaActivationBlock(l2BlockTime uint64) bool { return c.IsDelta(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsDelta(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsDelta(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // IsEcotoneActivationBlock returns whether the specified block is the first block subject to the // Ecotone upgrade. Ecotone activation at genesis does not count. func (c *Config) IsEcotoneActivationBlock(l2BlockTime uint64) bool { return c.IsEcotone(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsEcotone(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsEcotone(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { return c.IsInterop(l2BlockTime) && - l2BlockTime >= c.SecondBlockInterval() && - !c.IsInterop(l2BlockTime-c.SecondBlockInterval()) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsInterop(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // ForkchoiceUpdatedVersion returns the EngineAPIMethod suitable for the chain hard fork version. From 82404d6a5312c8b535f27af86a5bd6f1fd46a2ae Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 10:36:16 +0800 Subject: [PATCH 15/30] fix: no config volta hard fork for funcs --- op-node/rollup/types.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 4525f3f43a..7537c175e9 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -195,6 +195,9 @@ func (c *Config) IsVolta(timestamp uint64) bool { } func (c *Config) VoltaBlocNumber() uint64 { + if c.VoltaTime == nil || *c.VoltaTime == 0 { + return 0 + } return (*c.VoltaTime-c.Genesis.L2Time)/(BeforeVoltBlockTime/1000) + c.Genesis.L2.Number } @@ -261,7 +264,7 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { voltaBlockNumber := cfg.VoltaBlocNumber() - if blockNumber <= voltaBlockNumber { + if voltaBlockNumber == 0 || blockNumber <= voltaBlockNumber { return cfg.Genesis.L2Time*1000 + ((blockNumber - cfg.Genesis.L2.Number) * BeforeVoltBlockTime) } else { return voltaBlockNumber + *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime @@ -269,7 +272,8 @@ func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { } func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { - if milliTimestamp <= *cfg.VoltaTime*1000 { + voltaBlockNumber := cfg.VoltaBlocNumber() + if voltaBlockNumber == 0 || milliTimestamp <= *cfg.VoltaTime*1000 { // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that // difference by the block time to get the expected L2 block number at the current time. If the // unsafe head does not have this block number, then there is a gap in the queue. @@ -282,7 +286,6 @@ func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err err blocksSinceGenesis := wallClockGenesisDiff / BeforeVoltBlockTime return cfg.Genesis.L2.Number + blocksSinceGenesis, nil } else { - voltaBlockNumber := cfg.VoltaBlocNumber() voltaMilliTimestamp := *cfg.VoltaTime * 1000 wallClockGenesisDiff := milliTimestamp - voltaMilliTimestamp blocksSinceVolta := wallClockGenesisDiff / VoltBlockTime From 638b0d89fadc7755430fc32e0e4ab9cf633d4542 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 11:57:08 +0800 Subject: [PATCH 16/30] fix: compile error --- op-chain-ops/genesis/config.go | 2 +- op-node/chaincfg/chains.go | 6 ++-- op-node/p2p/app_params.go | 2 +- op-node/p2p/peer_params.go | 2 +- op-node/rollup/chain_spec.go | 2 +- op-node/rollup/derive/attributes.go | 4 +-- op-node/rollup/derive/attributes_queue.go | 2 +- op-node/rollup/derive/batch_queue.go | 4 +-- op-node/rollup/derive/batches.go | 14 ++++++--- op-node/rollup/derive/channel_in_reader.go | 4 ++- op-node/rollup/driver/metered_engine.go | 2 +- op-node/rollup/driver/origin_selector.go | 13 +++++++-- op-node/rollup/driver/sequencer.go | 33 +++++++++++++++++----- op-node/rollup/driver/state.go | 2 +- op-node/rollup/superchain.go | 2 +- op-node/service.go | 20 ++++++------- op-service/sources/l2_client.go | 4 +-- 17 files changed, 76 insertions(+), 42 deletions(-) diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 2aefa799bc..0242c7587e 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -720,7 +720,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas GasLimit: uint64(d.L2GenesisBlockGasLimit), }, }, - BlockTime: d.L2BlockTime, + //BlockTime: d.L2BlockTime, MaxSequencerDrift: d.MaxSequencerDrift, SeqWindowSize: d.SequencerWindowSize, ChannelTimeout: d.ChannelTimeout, diff --git a/op-node/chaincfg/chains.go b/op-node/chaincfg/chains.go index d7256bad94..7eebf2c304 100644 --- a/op-node/chaincfg/chains.go +++ b/op-node/chaincfg/chains.go @@ -127,7 +127,7 @@ var OPBNBMainnet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -163,7 +163,7 @@ var OPBNBTestnet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -199,7 +199,7 @@ var OPBNBQANet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, diff --git a/op-node/p2p/app_params.go b/op-node/p2p/app_params.go index e8bea84725..695b006744 100644 --- a/op-node/p2p/app_params.go +++ b/op-node/p2p/app_params.go @@ -24,7 +24,7 @@ type ApplicationScoreParams struct { } func LightApplicationScoreParams(cfg *rollup.Config) ApplicationScoreParams { - slot := time.Duration(cfg.BlockTime) * time.Second + slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/p2p/peer_params.go b/op-node/p2p/peer_params.go index 6a60e2160b..6c76025b26 100644 --- a/op-node/p2p/peer_params.go +++ b/op-node/p2p/peer_params.go @@ -33,7 +33,7 @@ func ScoreDecay(duration time.Duration, slot time.Duration) float64 { // // [PeerScoreParams]: https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub@v0.8.1#PeerScoreParams func LightPeerScoreParams(cfg *rollup.Config) pubsub.PeerScoreParams { - slot := time.Duration(cfg.BlockTime) * time.Second + slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index b43a5dc913..b0e0657e9c 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -30,7 +30,7 @@ const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock // the rollup config. // From Fjord, the max sequencer drift for a given block timestamp should be learned via the // ChainSpec instead of reading the rollup configuration field directly. -const maxSequencerDriftFjord = 1800 +const maxSequencerDriftFjord = 3600 type ForkName string diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 0f20f765e6..da1afea7e4 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -107,7 +107,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Calculate bsc block base fee var l1BaseFee *big.Int - if ba.rollupCfg.IsSnow((l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval()) / 1000) { + if ba.rollupCfg.IsSnow(ba.rollupCfg.NextSecondBlockTime(l2Parent.MillisecondTimestamp())) { l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch) if err != nil { return nil, err @@ -124,7 +124,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee) // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 - nextL2MilliTime := l2Parent.MillisecondTimestamp() + ba.rollupCfg.MillisecondBlockInterval() + nextL2MilliTime := ba.rollupCfg.NextMillisecondBlockTime(l2Parent.MillisecondTimestamp()) if nextL2MilliTime < l1Info.MillisecondTimestamp() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillisecondTimestamp())) diff --git a/op-node/rollup/derive/attributes_queue.go b/op-node/rollup/derive/attributes_queue.go index e05fd2b5dc..3e5218a7f7 100644 --- a/op-node/rollup/derive/attributes_queue.go +++ b/op-node/rollup/derive/attributes_queue.go @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash)) } // sanity check timestamp - if expected := l2SafeHead.MillisecondTimestamp() + aq.config.MillisecondBlockInterval(); expected != batch.Timestamp { + if expected := aq.config.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()); expected != batch.Timestamp { return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected)) } fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index b923b1ff84..566c6694cc 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si if len(bq.nextSpan) > 0 { // There are cached singular batches derived from the span batch. // Check if the next cached batch matches the given parent block. - if bq.nextSpan[0].Timestamp == parent.MillisecondTimestamp()+bq.config.MillisecondBlockInterval() { + if bq.nextSpan[0].Timestamp == bq.config.NextMillisecondBlockTime(parent.MillisecondTimestamp()) { // Pop first one and return. nextBatch := bq.popNextBatch(parent) // len(bq.nextSpan) == 0 means it's the last batch of the span. @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren // Find the first-seen batch that matches all validity conditions. // We may not have sufficient information to proceed filtering, and then we stop. // There may be none: in that case we force-create an empty batch - nextMilliTimestamp := parent.MillisecondTimestamp() + bq.config.MillisecondBlockInterval() + nextMilliTimestamp := bq.config.NextMillisecondBlockTime(parent.MillisecondTimestamp()) var nextBatch *BatchWithL1InclusionBlock // Go over all batches, in order of inclusion, and find the first batch we can accept. diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index a3b8b31e03..70eaccf538 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -67,7 +67,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo } epoch := l1Blocks[0] - nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.MillisecondBlockInterval() + nextMilliTimestamp := cfg.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()) if batch.Timestamp > nextMilliTimestamp { log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextMilliTimestamp) return BatchFuture @@ -194,7 +194,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B return BatchDrop } - nextMilliTimestamp := l2SafeHead.MillisecondTimestamp() + cfg.MillisecondBlockInterval() + nextMilliTimestamp := cfg.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()) if batch.GetTimestamp() > nextMilliTimestamp { log.Trace("received out-of-order batch for future processing after next batch", "next_ms_timestamp", nextMilliTimestamp) @@ -215,12 +215,18 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Warn("batch has misaligned timestamp, block time is too short") return BatchDrop } - if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%cfg.MillisecondBlockInterval() != 0 { + if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%rollup.VoltBlockTime != 0 { log.Warn("batch has misaligned timestamp, not overlapped exactly") return BatchDrop } - parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.MillisecondBlockInterval() - 1 + //parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.MillisecondBlockInterval() - 1 var err error + parentNum, err = cfg.TargetBlockNumber(batch.GetTimestamp()) + if err != nil { + log.Warn("failed to computer batch parent number", "batch_ms_time", batch.GetTimestamp(), "err", err) + // unable to validate the batch for now. retry later. + return BatchUndecided + } parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { log.Warn("failed to fetch L2 block", "number", parentNum, "err", err) diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index f7dde867bc..bc944c0488 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -105,7 +105,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { // This is just for early dropping invalid batches as soon as possible. return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) } - batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) + // double check + // temp VoltBlockTime + batch.Batch, err = DeriveSpanBatch(batchData, rollup.VoltBlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) if err != nil { return nil, err } diff --git a/op-node/rollup/driver/metered_engine.go b/op-node/rollup/driver/metered_engine.go index 29f2c7e4c8..01b5fc4f46 100644 --- a/op-node/rollup/driver/metered_engine.go +++ b/op-node/rollup/driver/metered_engine.go @@ -74,7 +74,7 @@ func (m *MeteredEngine) ConfirmPayload(ctx context.Context, agossip async.AsyncG sealTime := now.Sub(sealingStart) buildTime := now.Sub(m.buildingStartTime) m.metrics.RecordSequencerSealingTime(sealTime) - m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(m.cfg.BlockTime)*time.Second) + m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(rollup.VoltBlockTime)*time.Millisecond) txnCount := len(payload.ExecutionPayload.Transactions) m.metrics.CountSequencedTxs(txnCount) diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 1216c084f1..263f6f5785 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -53,14 +53,14 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // If we are past the sequencer depth, we may want to advance the origin, but need to still // check the time of the next origin. - pastSeqDrift := l2Head.MillisecondTimestamp()+los.cfg.BlockTime > currentOrigin.MillisecondTimestamp()+msd + pastSeqDrift := los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()) > currentOrigin.MillisecondTimestamp()+msd // Limit the time to fetch next origin block by default refCtx, refCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer refCancel() if pastSeqDrift { log.Warn("Next L2 block time is past the sequencer drift + current origin time", "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), - "l2_block_ms_interval", los.cfg.BlockTime, + "l2_block_ms_interval", los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()), "l1_origin_ms_timestamp", currentOrigin.MillisecondTimestamp(), "max_ms_drift", msd) // Must fetch next L1 block as long as it may take, cause we are pastSeqDrift @@ -98,8 +98,15 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // of slack. For simplicity, we implement our Sequencer to always start building on the latest // L1 block when we can. // If not pastSeqDrift and next origin receipts not cached, fallback to current origin. - if l2Head.MillisecondTimestamp()+los.cfg.MillisecondBlockInterval() >= nextOrigin.MillisecondTimestamp() && (pastSeqDrift || receiptsCached) { + if los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()) >= nextOrigin.MillisecondTimestamp() && (pastSeqDrift || receiptsCached) { return nextOrigin, nil + } else { + log.Warn("select l1 origin, give up next new origin", + "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), + "next_l2_head_ms_timestamp", los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()), + "next_l1_origin_ms_timestamp", nextOrigin.MillisecondTimestamp(), + "l2_past_seq_drift", pastSeqDrift, + "l1_receipts_cached", receiptsCached) } return currentOrigin, nil diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 04fc95fd04..e9d98ae932 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -154,7 +154,11 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { if safe { d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", buildingOnto, "onto_time", buildingOnto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval()) + + if buildingOnto == (eth.L2BlockRef{}) { + return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli()))) + } + return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval(buildingOnto.MillisecondTimestamp())) } head := d.engine.UnsafeL2Head() @@ -166,8 +170,15 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { return delay } - blockTime := time.Duration(d.rollupCfg.MillisecondBlockInterval()) * time.Millisecond - payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + d.rollupCfg.MillisecondBlockInterval())) + var blockInterval uint64 + if buildingOnto == (eth.L2BlockRef{}) { + blockInterval = d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli())) + } else { + blockInterval = d.rollupCfg.MillisecondBlockInterval(buildingOnto.MillisecondTimestamp()) + } + + blockTime := time.Millisecond * time.Duration(blockInterval) + payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + blockInterval)) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, @@ -222,11 +233,19 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef { // If the engine is currently building safe blocks, then that building is not interrupted, and sequencing is delayed. func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { // if the engine returns a non-empty payload, OR if the async gossiper already has a payload, we can CompleteBuildingBlock - if onto, buildingID, safe := d.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) || agossip.Get() != nil { + onto, buildingID, safe := d.engine.BuildingPayload() + var blockInterval uint64 + if onto == (eth.L2BlockRef{}) { + blockInterval = d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli())) + } else { + blockInterval = d.rollupCfg.MillisecondBlockInterval(onto.MillisecondTimestamp()) + } + + if buildingID != (eth.PayloadID{}) || agossip.Get() != nil { if safe { d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) return nil, nil } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) @@ -236,7 +255,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) return nil, err } else if errors.Is(err, derive.ErrTemporary) { @@ -265,7 +284,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As } else if errors.Is(err, derive.ErrReset) { d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval())) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer temporarily failed to start building new block", "err", err) diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index b004a91916..4ec74258e0 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -269,7 +269,7 @@ func (s *Driver) eventLoop() { // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(s.config.MillisecondBlockInterval()) * time.Millisecond * 2 + syncCheckInterval := time.Duration(rollup.VoltBlockTime) * time.Millisecond * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() lastUnsafeL2 := s.engineController.UnsafeL2Head() diff --git a/op-node/rollup/superchain.go b/op-node/rollup/superchain.go index acb5daa538..3714dce1f4 100644 --- a/op-node/rollup/superchain.go +++ b/op-node/rollup/superchain.go @@ -75,7 +75,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { // but since none of the superchain chains differ, it's not represented in the superchain-registry yet. // This restriction on superchain-chains may change in the future. // Test/Alt configurations can still load custom rollup-configs when necessary. - BlockTime: 2, + //BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/service.go b/op-node/service.go index b6df938f75..0422ef42ab 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -49,16 +49,16 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { rollupConfig.ProtocolVersionsAddress = common.Address{} } - { - if rollupConfig.BlockTime >= minSecondBlockInterval && rollupConfig.BlockTime <= maxSecondBlockInterval { - // Convert legacy second-level timestamp to millisecond timestamp, - // This is a compatibility behavior. - rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 - } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > maxMillisecondBlockInterval { - return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) - } - // rollupConfig.BlockTime is millisecond block interval - } + //{ + // if rollupConfig.BlockTime >= minSecondBlockInterval && rollupConfig.BlockTime <= maxSecondBlockInterval { + // // Convert legacy second-level timestamp to millisecond timestamp, + // // This is a compatibility behavior. + // rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 + // } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > maxMillisecondBlockInterval { + // return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) + // } + // // rollupConfig.BlockTime is millisecond block interval + //} configPersistence := NewConfigPersistence(ctx) diff --git a/op-service/sources/l2_client.go b/op-service/sources/l2_client.go index f27fbd5744..7a22f81954 100644 --- a/op-service/sources/l2_client.go +++ b/op-service/sources/l2_client.go @@ -32,9 +32,9 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig span := int(config.SeqWindowSize) * 3 / 2 // Estimate number of L2 blocks in this span of L1 blocks // (there's always one L2 block per L1 block, L1 is thus the minimum, even if block time is very high) - if config.SecondBlockInterval() < 12 && config.SecondBlockInterval() > 0 { + if config.SecondBlockInterval(0) < 12 && config.SecondBlockInterval(0) > 0 { span *= 12 - span /= int(config.SecondBlockInterval()) + span /= int(config.SecondBlockInterval(0)) } fullSpan := span if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large From b4cded2005b15bbdeb90cf56605d6711a6dddf60 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 14:10:14 +0800 Subject: [PATCH 17/30] feat: derive span batch with hard fork --- op-node/rollup/derive/attributes.go | 2 +- op-node/rollup/derive/channel_in_reader.go | 2 +- op-node/rollup/derive/span_batch.go | 26 +++++++++++++++++----- op-service/eth/types.go | 18 ++++++++------- 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index da1afea7e4..28fe7e7c65 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -178,7 +178,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconRoot, } - pa.SetMillisecondTimestamp(nextL2MilliTime) + pa.SetMillisecondTimestamp(nextL2MilliTime, ba.rollupCfg.IsVolta(nextL2MilliTime/1000)) return pa, nil } diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index bc944c0488..959f767ac2 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -107,7 +107,7 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { } // double check // temp VoltBlockTime - batch.Batch, err = DeriveSpanBatch(batchData, rollup.VoltBlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) + batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) if err != nil { return nil, err } diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 9e785a94e3..8e32038cdb 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -340,7 +340,7 @@ func (b *RawSpanBatch) encode(w io.Writer) error { // derive converts RawSpanBatch into SpanBatch, which has a list of SpanBatchElement. // We need chain config constants to derive values for making payload attributes. -func (b *RawSpanBatch) derive(milliBlockInterval, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { +func (b *RawSpanBatch) derive(rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { if b.blockCount == 0 { return nil, ErrEmptySpanBatch } @@ -361,6 +361,14 @@ func (b *RawSpanBatch) derive(milliBlockInterval, genesisTimestamp uint64, chain return nil, err } + var blockInterval uint64 + var millisecondTimestamp bool + if rollupCfg.IsVolta(b.relTimestamp) { + blockInterval = rollup.VoltBlockTime + millisecondTimestamp = true + } else { + blockInterval = rollup.BeforeVoltBlockTime + } spanBatch := SpanBatch{ ParentCheck: b.parentCheck, L1OriginCheck: b.l1OriginCheck, @@ -368,7 +376,13 @@ func (b *RawSpanBatch) derive(milliBlockInterval, genesisTimestamp uint64, chain txIdx := 0 for i := 0; i < int(b.blockCount); i++ { batch := SpanBatchElement{} - batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp + milliBlockInterval*uint64(i) + if millisecondTimestamp { + // relTimestamp and blockInterval has changed to millisecond + batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp + blockInterval*uint64(i) + } else { + // relTimestamp is second timestamp before volta + batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp*1000 + blockInterval*uint64(i) + } batch.EpochNum = rollup.Epoch(blockOriginNums[i]) for j := 0; j < int(b.blockTxCounts[i]); j++ { batch.Transactions = append(batch.Transactions, fullTxs[txIdx]) @@ -381,8 +395,8 @@ func (b *RawSpanBatch) derive(milliBlockInterval, genesisTimestamp uint64, chain // ToSpanBatch converts RawSpanBatch to SpanBatch, // which implements a wrapper of derive method of RawSpanBatch -func (b *RawSpanBatch) ToSpanBatch(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { - spanBatch, err := b.derive(blockTime, genesisTimestamp, chainID) +func (b *RawSpanBatch) ToSpanBatch(rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { + spanBatch, err := b.derive(rollupCfg, genesisTimestamp, chainID) if err != nil { return nil, err } @@ -619,13 +633,13 @@ func NewSpanBatch(genesisTimestamp uint64, chainID *big.Int) *SpanBatch { } // DeriveSpanBatch derives SpanBatch from BatchData. -func DeriveSpanBatch(batchData *BatchData, blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { +func DeriveSpanBatch(batchData *BatchData, rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { rawSpanBatch, ok := batchData.inner.(*RawSpanBatch) if !ok { return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch")) } // If the batch type is Span batch, derive block inputs from RawSpanBatch. - return rawSpanBatch.ToSpanBatch(blockTime, genesisTimestamp, chainID) + return rawSpanBatch.ToSpanBatch(rollupCfg, genesisTimestamp, chainID) } // ReadTxData reads raw RLP tx data from reader and returns txData and txType diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 9bf83e1863..e8df2c205b 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -341,15 +341,17 @@ func (pa *PayloadAttributes) MillisecondTimestamp() uint64 { // SetMillisecondTimestamp is used to set millisecond timestamp. // [32]byte PrevRandao // [0][1] represent l2 millisecond's mill part. -func (pa *PayloadAttributes) SetMillisecondTimestamp(ts uint64) { +func (pa *PayloadAttributes) SetMillisecondTimestamp(ts uint64, updateMilliSecond bool) { pa.Timestamp = hexutil.Uint64(ts / 1000) - milliPartBytes := uint256.NewInt(ts % 1000).Bytes32() - pa.PrevRandao[0] = milliPartBytes[30] - pa.PrevRandao[1] = milliPartBytes[31] - - // It is just a marker byte to ensure that the whole is not empty; - // op-geth relies on non-empty to determine that the passed in millisecond timestamp. - pa.PrevRandao[2] = 1 + if updateMilliSecond { + milliPartBytes := uint256.NewInt(ts % 1000).Bytes32() + pa.PrevRandao[0] = milliPartBytes[30] + pa.PrevRandao[1] = milliPartBytes[31] + + // It is just a marker byte to ensure that the whole is not empty; + // op-geth relies on non-empty to determine that the passed in millisecond timestamp. + pa.PrevRandao[2] = 1 + } } type ExecutePayloadStatus string From 6abc63a166ff523d90f61cc8746178bb7501c5a3 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 14:45:12 +0800 Subject: [PATCH 18/30] fix: span batche delta real time --- op-node/rollup/derive/span_batch.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 8e32038cdb..a600e2a24f 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -363,12 +363,18 @@ func (b *RawSpanBatch) derive(rollupCfg *rollup.Config, genesisTimestamp uint64, var blockInterval uint64 var millisecondTimestamp bool - if rollupCfg.IsVolta(b.relTimestamp) { - blockInterval = rollup.VoltBlockTime - millisecondTimestamp = true + if rollupCfg.VoltaTime != nil && *rollupCfg.VoltaTime > genesisTimestamp { + secondSinceVolta := *rollupCfg.VoltaTime - genesisTimestamp + if b.relTimestamp >= secondSinceVolta { + blockInterval = rollup.VoltBlockTime + millisecondTimestamp = true + } else { + blockInterval = rollup.BeforeVoltBlockTime + } } else { blockInterval = rollup.BeforeVoltBlockTime } + spanBatch := SpanBatch{ ParentCheck: b.parentCheck, L1OriginCheck: b.l1OriginCheck, From d56ecb38dec4abd57a4b89bbd4f1859062b30795 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Tue, 18 Mar 2025 15:03:14 +0800 Subject: [PATCH 19/30] fix: max sequencer window config --- op-node/rollup/chain_spec.go | 2 +- op-node/rollup/driver/origin_selector.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index b0e0657e9c..b43a5dc913 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -30,7 +30,7 @@ const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock // the rollup config. // From Fjord, the max sequencer drift for a given block timestamp should be learned via the // ChainSpec instead of reading the rollup configuration field directly. -const maxSequencerDriftFjord = 3600 +const maxSequencerDriftFjord = 1800 type ForkName string diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 263f6f5785..525bed5046 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -101,11 +101,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc if los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()) >= nextOrigin.MillisecondTimestamp() && (pastSeqDrift || receiptsCached) { return nextOrigin, nil } else { - log.Warn("select l1 origin, give up next new origin", - "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), + log.Warn("select l1 old origin, give up next origin", + "current_l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), "next_l2_head_ms_timestamp", los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()), + "current_l1_origin_ms_timestamp", currentOrigin.MillisecondTimestamp(), "next_l1_origin_ms_timestamp", nextOrigin.MillisecondTimestamp(), "l2_past_seq_drift", pastSeqDrift, + "max_ms_drift", msd, "l1_receipts_cached", receiptsCached) } From 5310e17da55117be2f42f207af4fed3605e4c66a Mon Sep 17 00:00:00 2001 From: 2020xibao <2020xibao@gmail.com> Date: Tue, 18 Mar 2025 17:37:30 +0800 Subject: [PATCH 20/30] chore: batcher support volta fork --- op-batcher/batcher/channel_builder.go | 2 +- op-batcher/batcher/channel_manager.go | 17 +++++++++++++++++ op-node/rollup/derive/channel_out.go | 23 ++++++++++++++--------- op-node/rollup/derive/span_batch.go | 17 +++++++++++++++-- op-node/rollup/derive/span_channel_out.go | 7 +++---- op-service/eth/id.go | 2 +- 6 files changed, 51 insertions(+), 17 deletions(-) diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index bed788e766..cdb42845ca 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -156,7 +156,7 @@ func (c *ChannelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, erro return l1info, fmt.Errorf("converting block to batch: %w", err) } - if err = c.co.AddSingularBatch(batch, l1info.SequenceNumber); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.ErrCompressorFull) { + if err = c.co.AddSingularBatch(&c.rollupCfg, batch, l1info.SequenceNumber); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.ErrCompressorFull) { c.setFullErr(err) return l1info, c.FullErr() } else if err != nil { diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 7d0f10e29a..0b0d523e09 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -48,6 +48,8 @@ type channelManager struct { // if set to true, prevents production of any new channel frames closed bool + + isVolta bool } func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollupCfg *rollup.Config) *channelManager { @@ -261,6 +263,14 @@ func (s *channelManager) processBlocks() error { latestL2ref eth.L2BlockRef ) for i, block := range s.blocks { + if !s.isVolta && s.rollupCfg.IsVolta(block.Time()) && s.currentChannel.HasTxData() { + // the current channel is before volta fork. + s.currentChannel.Close() + s.isVolta = true + log.Info("before volta fork channel", "channel_id", s.currentChannel.ID(), "block_time", block.Time()) + break + } + l1info, err := s.currentChannel.AddBlock(block) if errors.As(err, &_chFullErr) { // current block didn't get added because channel is already full @@ -298,6 +308,7 @@ func (s *channelManager) processBlocks() error { "channel_full", s.currentChannel.IsFull(), "input_bytes", s.currentChannel.InputBytes(), "ready_bytes", s.currentChannel.ReadyBytes(), + "is_volta", s.isVolta, ) return nil } @@ -354,6 +365,12 @@ func (s *channelManager) AddL2Block(block *types.Block) error { return ErrReorg } + if s.tip == (common.Hash{}) && s.rollupCfg.IsVolta(block.Time()) { + // set volta flag at startup + s.isVolta = true + log.Info("succeed to set is volta flag", "block_time", block.Time()) + } + s.metr.RecordL2BlockInPendingQueue(block) s.blocks = append(s.blocks, block) s.tip = block.Hash() diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index f8b91f3319..f6ced4dd74 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -56,7 +56,7 @@ type ChannelOut interface { ID() ChannelID Reset() error AddBlock(*rollup.Config, *types.Block) error - AddSingularBatch(*SingularBatch, uint64) error + AddSingularBatch(*rollup.Config, *SingularBatch, uint64) error InputBytes() int ReadyBytes() int Flush() error @@ -119,7 +119,7 @@ func (co *SingularChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Bl if err != nil { return err } - return co.AddSingularBatch(batch, l1Info.SequenceNumber) + return co.AddSingularBatch(rollupCfg, batch, l1Info.SequenceNumber) } // AddSingularBatch adds a batch to the channel. It returns the RLP encoded byte size @@ -130,7 +130,7 @@ func (co *SingularChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Bl // AddSingularBatch should be used together with BlockToBatch if you need to access the // BatchData before adding a block to the channel. It isn't possible to access // the batch data with AddBlock. -func (co *SingularChannelOut) AddSingularBatch(batch *SingularBatch, _ uint64) error { +func (co *SingularChannelOut) AddSingularBatch(cfg *rollup.Config, batch *SingularBatch, _ uint64) error { if co.closed { return ErrChannelOutAlreadyClosed } @@ -241,18 +241,23 @@ func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*Singul return nil, l1Info, fmt.Errorf("could not parse the L1 Info deposit: %w", err) } - milliPart := uint64(0) - if block.MixDigest() != (common.Hash{}) { - milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + ts := uint64(0) + isVolta := rollupCfg.IsVolta(block.Time()) + if isVolta { // after volta fork + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + ts = block.Time()*1000 + milliPart + } else { // before volta fork + ts = block.Time() } - milliTimestamp := block.Time()*1000 + milliPart - return &SingularBatch{ ParentHash: block.ParentHash(), EpochNum: rollup.Epoch(l1Info.Number), EpochHash: l1Info.BlockHash, - Timestamp: milliTimestamp, // has changed to milli timestamp + Timestamp: ts, Transactions: opaqueTxs, }, l1Info, nil } diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index a600e2a24f..275225be7a 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -440,6 +440,8 @@ type SpanBatch struct { originBits *big.Int blockTxCounts []uint64 sbtxs *spanBatchTxs + + cfg *rollup.Config } func (b *SpanBatch) AsSingularBatch() (*SingularBatch, bool) { return nil, false } @@ -568,16 +570,27 @@ func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch, seqNum uin } // ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch -func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { +func (b *SpanBatch) ToRawSpanBatch(cfg *rollup.Config) (*RawSpanBatch, error) { if len(b.Batches) == 0 { return nil, errors.New("cannot merge empty singularBatch list") } span_start := b.Batches[0] span_end := b.Batches[len(b.Batches)-1] + relTs := uint64(0) + if cfg.IsVolta(span_start.Timestamp) { + relTs = span_start.Timestamp - b.MillisecondGenesisTimestamp() + } else { + relTs = span_start.Timestamp + b.GenesisTimestamp + } + log.Info("succeed to make raw span_batch", + "span_start_timestamp", span_start.Timestamp, + "rel_timestamp", relTs, + "genesis_timestamp", b.GenesisTimestamp) + return &RawSpanBatch{ spanBatchPrefix: spanBatchPrefix{ - relTimestamp: span_start.Timestamp - b.MillisecondGenesisTimestamp(), + relTimestamp: relTs, l1OriginNum: uint64(span_end.EpochNum), parentCheck: b.ParentCheck, l1OriginCheck: b.L1OriginCheck, diff --git a/op-node/rollup/derive/span_channel_out.go b/op-node/rollup/derive/span_channel_out.go index 8e02b55378..55afd2f4f6 100644 --- a/op-node/rollup/derive/span_channel_out.go +++ b/op-node/rollup/derive/span_channel_out.go @@ -2,7 +2,6 @@ package derive import ( "bytes" - "crypto/rand" "fmt" "io" @@ -108,14 +107,14 @@ func (co *SpanChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) if err != nil { return err } - return co.AddSingularBatch(batch, l1Info.SequenceNumber) + return co.AddSingularBatch(rollupCfg, batch, l1Info.SequenceNumber) } // AddSingularBatch adds a SingularBatch to the channel, compressing the data if necessary. // if the new batch would make the channel exceed the target size, the last batch is reverted, // and the compression happens on the previous RLP buffer instead // if the input is too small to need compression, data is accumulated but not compressed -func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) error { +func (co *SpanChannelOut) AddSingularBatch(cfg *rollup.Config, batch *SingularBatch, seqNum uint64) error { // sentinel error for closed or full channel if co.closed { return ErrChannelOutAlreadyClosed @@ -129,7 +128,7 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) return fmt.Errorf("failed to append SingularBatch to SpanBatch: %w", err) } // convert Span batch to RawSpanBatch - rawSpanBatch, err := co.spanBatch.ToRawSpanBatch() + rawSpanBatch, err := co.spanBatch.ToRawSpanBatch(cfg) if err != nil { return fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err) } diff --git a/op-service/eth/id.go b/op-service/eth/id.go index dedbdfde65..14b356a3e8 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -67,7 +67,7 @@ func (id L1BlockRef) MillisecondTimestamp() uint64 { } func (id L1BlockRef) String() string { - return fmt.Sprintf("%s:%d", id.Hash.String(), id.Number) + return fmt.Sprintf("%s:%d:%d", id.Hash.String(), id.Number, id.MillisecondTimestamp()) } // TerminalString implements log.TerminalStringer, formatting a string for console From 85ed5e21d9e067cdfbd1f7041dfb60d45ef1e3ff Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Wed, 19 Mar 2025 13:06:53 +0800 Subject: [PATCH 21/30] add some log to print milliseonds timestamp info --- op-batcher/batcher/channel_manager.go | 5 ++++- op-node/rollup/derive/attributes.go | 10 +++++++++- op-node/rollup/derive/channel_out.go | 4 ++++ op-node/rollup/derive/payload_util.go | 3 +++ op-node/rollup/derive/span_batch.go | 4 ++++ op-node/rollup/driver/sequencer.go | 7 +++++-- op-node/rollup/driver/state.go | 2 +- 7 files changed, 30 insertions(+), 5 deletions(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 0b0d523e09..0f5730d494 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -368,7 +368,8 @@ func (s *channelManager) AddL2Block(block *types.Block) error { if s.tip == (common.Hash{}) && s.rollupCfg.IsVolta(block.Time()) { // set volta flag at startup s.isVolta = true - log.Info("succeed to set is volta flag", "block_time", block.Time()) + log.Info("succeed to set is volta flag", "block_time", block.Time(), + "l2 block num", block.Number()) } s.metr.RecordL2BlockInPendingQueue(block) @@ -385,6 +386,8 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) } + log.Debug("generate l2 block ref:", "milli-timestamp", milliPart, + "seconds-timestamp", block.Time(), "l2 block number", block.Number()) return eth.L2BlockRef{ Hash: block.Hash(), Number: block.NumberU64(), diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 28fe7e7c65..50a82ba804 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) var ( @@ -178,7 +179,14 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconRoot, } - pa.SetMillisecondTimestamp(nextL2MilliTime, ba.rollupCfg.IsVolta(nextL2MilliTime/1000)) + + isVoltaTime := ba.rollupCfg.IsVolta(nextL2MilliTime / 1000) + pa.SetMillisecondTimestamp(nextL2MilliTime, isVoltaTime) + if isVoltaTime { + log.Debug("succeed to build payload attributes after fork", + "timestamp_ms", nextL2MilliTime, "seconds-timestamp", pa.Timestamp, + "l1 origin", l1Info.NumberU64(), "l2 parent block", l2Parent.Number) + } return pa, nil } diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index f6ced4dd74..da48d54177 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -249,6 +250,9 @@ func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*Singul milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) } ts = block.Time()*1000 + milliPart + log.Debug("succeed to transform singular batch after fork", + "timestamp_ms", milliPart, "seconds-timestamp", block.Time(), + "l2 block", block.Number(), "l1 origin", l1Info.Number) } else { // before volta fork ts = block.Time() } diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index 70fd30a355..17feb82994 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) // PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload, @@ -43,6 +44,8 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) // adapts millisecond part milliPart := uint64(0) milliPart = uint64(payload.PrevRandao[0])*256 + uint64(payload.PrevRandao[1]) + log.Debug("generate l2 block ref by payload", "timestamp_ms", milliPart, + "payload timestamp", payload.Timestamp, "block num", payload.BlockNumber) return eth.L2BlockRef{ Hash: payload.BlockHash, diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 275225be7a..763f2fcca1 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -396,6 +396,10 @@ func (b *RawSpanBatch) derive(rollupCfg *rollup.Config, genesisTimestamp uint64, } spanBatch.Batches = append(spanBatch.Batches, &batch) } + if millisecondTimestamp { + log.Debug("succeed to build span batch with milliseconds timestamp", "rel timestamp", b.relTimestamp, + "first l1 origin", spanBatch.GetStartEpochNum(), "block count", spanBatch.GetBlockCount()) + } return &spanBatch, nil } diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index e9d98ae932..76bd91192a 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -253,7 +253,9 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { - d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.log.Error("sequencer failed to seal new block, requiring derivation reset", + "L2 block number", onto.Number, "L1 origin", onto.L1Origin.Number, "timestamp_ms", + onto.MilliTime, "timestamp_second", onto.Time, "err", err) d.metrics.RecordSequencerReset() d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) @@ -282,7 +284,8 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if errors.Is(err, derive.ErrCritical) { return nil, err } else if errors.Is(err, derive.ErrReset) { - d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.log.Error("sequencer failed to seal new block, requiring derivation reset", + "L2 block number", onto.Number, "timestamp_ms", onto.MilliTime, "timestamp_second", onto.Time, "err", err) d.metrics.RecordSequencerReset() d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block return nil, err diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 4ec74258e0..f1c5bd5f1b 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -438,7 +438,7 @@ func (s *Driver) eventLoop() { continue } else if err != nil && errors.Is(err, derive.ErrReset) { // If the pipeline corrupts, e.g. due to a reorg, simply reset it - s.log.Warn("Derivation pipeline is reset", "err", err) + s.log.Warn("Derivation pipeline is reset", "l1 origin", s.derivation.Origin().Number, "err", err) s.derivation.Reset() s.metrics.RecordPipelineReset() continue From c1866420a807d9ae84d7e463300925cb22e19262 Mon Sep 17 00:00:00 2001 From: VM Date: Thu, 20 Mar 2025 15:36:09 +0800 Subject: [PATCH 22/30] feat: L2OutputOracle uses millisecond to compute l2 timestamp after volta hardfork --- .../src/L1/L2OutputOracle.sol | 42 ++++++++++++++++--- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index fe2c7dd7c8..47736c77b1 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -25,10 +25,14 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public submissionInterval; - /// @notice The time between L2 blocks in seconds. Once set, this value MUST NOT be modified. + /// @notice The time between L2 blocks in seconds before Volta Hardfork. Once set, this value MUST NOT be modified. /// @custom:network-specific uint256 public l2BlockTime; + /// @notice The time between L2 blocks in milliseconds) after Volta Hardfork. + /// @custom:network-specific + uint256 public constant l2MillisecondsBlockTime = 500; + /// @notice The address of the challenger. Can be updated via upgrade. /// @custom:network-specific address public challenger; @@ -41,6 +45,11 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public finalizationPeriodSeconds; + // TODO: compute accurate hardfork block number + /// @notice The block number of Volta Hardfork. + /// @custom:network-specific + uint256 public constant voltaBlockNumber = 1000; + /// @notice Emitted when an output is proposed. /// @param outputRoot The output root. /// @param l2OutputIndex The index of the output in the l2Outputs array. @@ -201,10 +210,7 @@ contract L2OutputOracle is Initializable, ISemver { "L2OutputOracle: block number must be equal to next expected block number" ); - require( - computeL2Timestamp(_l2BlockNumber) < block.timestamp, - "L2OutputOracle: cannot propose L2 output in the future" - ); + require(isL2TimestampValid(_l2BlockNumber), "L2OutputOracle: cannot propose L2 output in the future"); require(_outputRoot != bytes32(0), "L2OutputOracle: L2 output proposal cannot be the zero hash"); @@ -307,10 +313,34 @@ contract L2OutputOracle is Initializable, ISemver { return latestBlockNumber() + submissionInterval; } + /// @notice Checks the given l2 block number is valid. + /// @param _l2BlockNumber The L2 block number of the target block. + /// @return True that can submit output root, otherwise false. + function isL2TimestampValid(uint256 _l2BlockNumber) public view returns (uint256) { + uint256 l2Timestamp = block.number <= voltaBlockNumber + ? computeL2Timestamp(_l2BlockNumber) + : computeL2TimestampAfterVolta(_l2BlockNumber); + + uint256 currentTimestamp = block.number <= voltaBlockNumber ? block.timestamp : block.timestamp * 1000; + + return l2Timestamp < currentTimestamp; + } + /// @notice Returns the L2 timestamp corresponding to a given L2 block number. /// @param _l2BlockNumber The L2 block number of the target block. - /// @return L2 timestamp of the given block. + /// @return L2 timestamp of the given block in seconds. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); } + + /// @notice Returns the L2 timestamp corresponding to a given L2 block number after Volta Hardfork. + /// @param _l2BlockNumber The L2 block number of the target block. + /// @return L2 timestamp of the given block in milliseconds. + function computeL2TimestampAfterVolta(uint256 _l2BlockNumber) public view returns (uint256) { + uint256 beforeVoltaBlockTime = (voltaBlockNumber - startingBlockNumber) * l2BlockTime * 1000; + uint256 afterVoltaBlockTime = (_l2BlockNumber - voltaBlockNumber) * l2MillisecondsBlockTime; + uint256 totalPassedBlockTime = beforeVoltaBlockTime + afterVoltaBlockTime; + + return (startingTimestamp * 1000) + totalPassedBlockTime; + } } From 6e44689877fc146e2e012318dc265fc5c6ba8267 Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Thu, 20 Mar 2025 15:38:39 +0800 Subject: [PATCH 23/30] fix: fix batcher reltime wrong calculate method (#274) Co-authored-by: 2020xibao <2020xibao@gmail.com> --- op-node/rollup/derive/span_batch.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 763f2fcca1..6bbc0ff333 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -585,12 +585,13 @@ func (b *SpanBatch) ToRawSpanBatch(cfg *rollup.Config) (*RawSpanBatch, error) { if cfg.IsVolta(span_start.Timestamp) { relTs = span_start.Timestamp - b.MillisecondGenesisTimestamp() } else { - relTs = span_start.Timestamp + b.GenesisTimestamp + relTs = span_start.Timestamp - b.GenesisTimestamp } - log.Info("succeed to make raw span_batch", + log.Debug("succeed to make raw span_batch", "span_start_timestamp", span_start.Timestamp, "rel_timestamp", relTs, - "genesis_timestamp", b.GenesisTimestamp) + "genesis_timestamp", b.GenesisTimestamp, + "is_volta", cfg.IsVolta(span_start.Timestamp)) return &RawSpanBatch{ spanBatchPrefix: spanBatchPrefix{ From 8db9ba64046f166515da3afee166579caa652ca4 Mon Sep 17 00:00:00 2001 From: VM Date: Thu, 20 Mar 2025 15:47:18 +0800 Subject: [PATCH 24/30] fix: fix constant comments --- packages/contracts-bedrock/src/L1/L2OutputOracle.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index 47736c77b1..2adcefd632 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -29,7 +29,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public l2BlockTime; - /// @notice The time between L2 blocks in milliseconds) after Volta Hardfork. + /// @notice The time between L2 blocks in milliseconds after Volta Hardfork. /// @custom:network-specific uint256 public constant l2MillisecondsBlockTime = 500; From fedbd97570c7f857ec07ea738371f4fd0592fb54 Mon Sep 17 00:00:00 2001 From: VM Date: Thu, 20 Mar 2025 19:31:47 +0800 Subject: [PATCH 25/30] fix: update return type --- packages/contracts-bedrock/src/L1/L2OutputOracle.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index 2adcefd632..901b3fc537 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -316,7 +316,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @notice Checks the given l2 block number is valid. /// @param _l2BlockNumber The L2 block number of the target block. /// @return True that can submit output root, otherwise false. - function isL2TimestampValid(uint256 _l2BlockNumber) public view returns (uint256) { + function isL2TimestampValid(uint256 _l2BlockNumber) public view returns (bool) { uint256 l2Timestamp = block.number <= voltaBlockNumber ? computeL2Timestamp(_l2BlockNumber) : computeL2TimestampAfterVolta(_l2BlockNumber); From 82d902db8f19ef7d240636550864c73a08c354b1 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Thu, 20 Mar 2025 20:35:27 +0800 Subject: [PATCH 26/30] fix: parenct number compute error --- op-node/rollup/derive/batches.go | 1 + 1 file changed, 1 insertion(+) diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 70eaccf538..4f116f27b8 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -227,6 +227,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B // unable to validate the batch for now. retry later. return BatchUndecided } + parentNum = parentNum - 1 parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { log.Warn("failed to fetch L2 block", "number", parentNum, "err", err) From 23eaf6cd2d84e9e4f167d4a62a3327845d4cd032 Mon Sep 17 00:00:00 2001 From: joeylichang Date: Thu, 20 Mar 2025 20:38:35 +0800 Subject: [PATCH 27/30] fix: cr comments --- op-node/rollup/derive/span_batch.go | 2 -- op-node/rollup/types.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 6bbc0ff333..cbef43407d 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -444,8 +444,6 @@ type SpanBatch struct { originBits *big.Int blockTxCounts []uint64 sbtxs *spanBatchTxs - - cfg *rollup.Config } func (b *SpanBatch) AsSingularBatch() (*SingularBatch, bool) { return nil, false } diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 7537c175e9..4bcd086031 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -267,7 +267,7 @@ func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { if voltaBlockNumber == 0 || blockNumber <= voltaBlockNumber { return cfg.Genesis.L2Time*1000 + ((blockNumber - cfg.Genesis.L2.Number) * BeforeVoltBlockTime) } else { - return voltaBlockNumber + *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime + return *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime } } From 393cd534552ed5d1d1c46081ad8e6a056829bc0c Mon Sep 17 00:00:00 2001 From: joeylichang Date: Fri, 21 Mar 2025 11:03:57 +0800 Subject: [PATCH 28/30] fix: batcher channel has data judgment --- op-batcher/batcher/channel_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 0f5730d494..885a60f8f1 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -263,7 +263,7 @@ func (s *channelManager) processBlocks() error { latestL2ref eth.L2BlockRef ) for i, block := range s.blocks { - if !s.isVolta && s.rollupCfg.IsVolta(block.Time()) && s.currentChannel.HasTxData() { + if !s.isVolta && s.rollupCfg.IsVolta(block.Time()) && s.currentChannel.InputBytes() != 0 { // the current channel is before volta fork. s.currentChannel.Close() s.isVolta = true From c53df4bd952e9ef118c4605fae8fb5276dcd9c0a Mon Sep 17 00:00:00 2001 From: VM Date: Fri, 21 Mar 2025 19:28:53 +0800 Subject: [PATCH 29/30] fix: update the logics --- .../src/L1/L2OutputOracle.sol | 10 +++---- .../test/L1/L2OutputOracle.t.sol | 29 +++++++++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index 901b3fc537..849385efa9 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -30,7 +30,6 @@ contract L2OutputOracle is Initializable, ISemver { uint256 public l2BlockTime; /// @notice The time between L2 blocks in milliseconds after Volta Hardfork. - /// @custom:network-specific uint256 public constant l2MillisecondsBlockTime = 500; /// @notice The address of the challenger. Can be updated via upgrade. @@ -46,9 +45,9 @@ contract L2OutputOracle is Initializable, ISemver { uint256 public finalizationPeriodSeconds; // TODO: compute accurate hardfork block number - /// @notice The block number of Volta Hardfork. + /// @notice The L2 block number of Volta Hardfork. /// @custom:network-specific - uint256 public constant voltaBlockNumber = 1000; + uint256 public constant voltaBlockNumber = 138901; /// @notice Emitted when an output is proposed. /// @param outputRoot The output root. @@ -317,11 +316,11 @@ contract L2OutputOracle is Initializable, ISemver { /// @param _l2BlockNumber The L2 block number of the target block. /// @return True that can submit output root, otherwise false. function isL2TimestampValid(uint256 _l2BlockNumber) public view returns (bool) { - uint256 l2Timestamp = block.number <= voltaBlockNumber + uint256 l2Timestamp = _l2BlockNumber <= voltaBlockNumber ? computeL2Timestamp(_l2BlockNumber) : computeL2TimestampAfterVolta(_l2BlockNumber); - uint256 currentTimestamp = block.number <= voltaBlockNumber ? block.timestamp : block.timestamp * 1000; + uint256 currentTimestamp = _l2BlockNumber <= voltaBlockNumber ? block.timestamp : block.timestamp * 1000; return l2Timestamp < currentTimestamp; } @@ -330,6 +329,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @param _l2BlockNumber The L2 block number of the target block. /// @return L2 timestamp of the given block in seconds. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { + // _l2BlockNumber: 480, startingBlockNumber: 0, l2BlockTime: 1 return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); } diff --git a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol index 3d89809e9b..821041f004 100644 --- a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol @@ -197,6 +197,35 @@ contract L2OutputOracle_getter_Test is CommonTest { l2OutputOracle.computeL2Timestamp(startingBlockNumber + 96024), startingTimestamp + l2BlockTime * 96024 ); } + + function test_isL2TimestampValid_before_hardfork_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + assertEq(startingBlockNumber, 1); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + assertEq(startingTimestamp, 1); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + assertEq(l2BlockTime, 2); + + vm.warp(138901 * 2 + 1); + l2OutputOracle.isL2TimestampValid(138901); + } + + function test_isL2TimestampValid_after_hardfork_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + + vm.roll(140100); + l2OutputOracle.isL2TimestampValid(140000); + } + + function test_computeL2TimestampAfterVolta_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + + l2OutputOracle.computeL2TimestampAfterVolta(140000); + } } contract L2OutputOracle_proposeL2Output_Test is CommonTest { From a37968f41c5e420f7d15b4e122897db7e93f5074 Mon Sep 17 00:00:00 2001 From: VM Date: Fri, 21 Mar 2025 23:36:43 +0800 Subject: [PATCH 30/30] fix: update voltaBlockNumber to 0 --- packages/contracts-bedrock/src/L1/L2OutputOracle.sol | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index 849385efa9..748901f196 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -29,9 +29,6 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public l2BlockTime; - /// @notice The time between L2 blocks in milliseconds after Volta Hardfork. - uint256 public constant l2MillisecondsBlockTime = 500; - /// @notice The address of the challenger. Can be updated via upgrade. /// @custom:network-specific address public challenger; @@ -44,10 +41,12 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public finalizationPeriodSeconds; + /// @notice The time between L2 blocks in milliseconds after Volta Hardfork. + uint256 public constant l2MillisecondsBlockTime = 500; + // TODO: compute accurate hardfork block number /// @notice The L2 block number of Volta Hardfork. - /// @custom:network-specific - uint256 public constant voltaBlockNumber = 138901; + uint256 public constant voltaBlockNumber = 0; /// @notice Emitted when an output is proposed. /// @param outputRoot The output root. @@ -329,7 +328,6 @@ contract L2OutputOracle is Initializable, ISemver { /// @param _l2BlockNumber The L2 block number of the target block. /// @return L2 timestamp of the given block in seconds. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { - // _l2BlockNumber: 480, startingBlockNumber: 0, l2BlockTime: 1 return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); }